From c0df2e6c334c0885c112187320172e6cd10b51b7 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 19 Jan 2020 20:57:05 -0500 Subject: Only generate regexes during regex mode --- src/app.rs | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/app.rs b/src/app.rs index 99d4641b..244f3872 100644 --- a/src/app.rs +++ b/src/app.rs @@ -233,6 +233,17 @@ impl App { if !self.is_in_dialog() && self.is_searching() { if let ApplicationPosition::ProcessSearch = self.current_application_position { self.use_simple = !self.use_simple; + + // Update to latest (when simple is on this is not updated) + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } + + // Force update to process display in GUI self.update_process_gui = true; } } @@ -266,12 +277,13 @@ impl App { self.current_search_query .remove(self.current_cursor_position); - // TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex! - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } self.update_process_gui = true; } } @@ -352,12 +364,13 @@ impl App { .insert(self.current_cursor_position, caught_char); self.current_cursor_position += 1; - // TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex! - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } self.update_process_gui = true; } else { match caught_char { -- cgit v1.2.3 From 840b0cccc8549efd7ffb6ddde8bb5d2319fe6665 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Mon, 20 Jan 2020 01:28:30 -0500 Subject: Slightly optimized how networking is... I think. --- src/app/data_collection.rs | 89 ++++++++++++++++++++++----------- src/app/data_collection/network.rs | 86 ++++++++++++++++++++----------- src/data_conversion.rs | 100 ++++++++++++++++--------------------- src/main.rs | 2 +- 4 files changed, 159 insertions(+), 118 deletions(-) (limited to 'src') diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 54a76bb4..5042c6d6 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -23,7 +23,7 @@ fn push_if_valid(result: &Result, vector_to_push: &mut } } -#[derive(Debug, Default, Clone)] +#[derive(Clone, Debug, Default)] pub struct Data { pub list_of_cpu_packages: Vec, pub list_of_io: Vec, @@ -31,10 +31,10 @@ pub struct Data { pub memory: Vec, pub swap: Vec, pub list_of_temperature_sensor: Vec, - pub network: Vec, + pub network: network::NetworkStorage, pub list_of_processes: Vec, pub grouped_list_of_processes: Option>, - pub list_of_disks: Vec, // Only need to keep a list of disks and their data + pub list_of_disks: Vec, } pub struct DataState { @@ -45,9 +45,6 @@ pub struct DataState { prev_pid_stats: HashMap, prev_idle: f64, prev_non_idle: f64, - prev_net_rx_bytes: u64, - prev_net_tx_bytes: u64, - prev_net_access_time: Instant, temperature_type: temperature::TemperatureType, last_clean: Instant, // Last time stale data was cleared use_current_cpu_total: bool, @@ -63,9 +60,6 @@ impl Default for DataState { prev_pid_stats: HashMap::new(), prev_idle: 0_f64, prev_non_idle: 0_f64, - prev_net_rx_bytes: 0, - prev_net_tx_bytes: 0, - prev_net_access_time: Instant::now(), temperature_type: temperature::TemperatureType::Celsius, last_clean: Instant::now(), use_current_cpu_total: false, @@ -97,18 +91,61 @@ impl DataState { let current_instant = std::time::Instant::now(); + // Network + let new_network_data = network::get_network_data( + &self.sys, + &self.data.network.last_collection_time, + &mut self.data.network.total_rx, + &mut self.data.network.total_tx, + ¤t_instant, + ) + .await; + + let joining_points: Option> = + if !self.data.network.data_points.is_empty() { + if let Some(prev_data) = self + .data + .network + .data_points + .get(&self.data.network.last_collection_time) + { + // If not empty, inject joining points + + let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; + let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; + let time_gap = current_instant + .duration_since(self.data.network.last_collection_time) + .as_millis() as f64; + + let mut new_joining_points = Vec::new(); + + for idx in 0..100 { + new_joining_points.push(network::NetworkJoinPoint { + rx: prev_data.0.rx as f64 + rx_diff / 100.0 * idx as f64, + tx: prev_data.0.tx as f64 + tx_diff / 100.0 * idx as f64, + time_offset_milliseconds: time_gap / 100.0 * (100 - idx) as f64, + }); + } + Some(new_joining_points) + } else { + None + } + } else { + None + }; + + // Set values + self.data.network.rx = new_network_data.rx; + self.data.network.tx = new_network_data.tx; + self.data.network.last_collection_time = current_instant; + + // Add new point + self.data + .network + .data_points + .insert(current_instant, (new_network_data, joining_points)); + // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! - push_if_valid( - &network::get_network_data( - &self.sys, - &mut self.prev_net_rx_bytes, - &mut self.prev_net_tx_bytes, - &mut self.prev_net_access_time, - ¤t_instant, - ) - .await, - &mut self.data.network, - ); push_if_valid( &cpu::get_cpu_data_list(&self.sys, ¤t_instant), &mut self.data.list_of_cpu_packages, @@ -167,6 +204,8 @@ impl DataState { self.prev_pid_stats.remove(&stale); } + // TODO: [OPT] cleaning stale network + self.data.list_of_cpu_packages = self .data .list_of_cpu_packages @@ -197,16 +236,6 @@ impl DataState { }) .collect::>(); - self.data.network = self - .data - .network - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - self.data.list_of_io = self .data .list_of_io diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs index 1ba7c90f..c9b97de6 100644 --- a/src/app/data_collection/network.rs +++ b/src/app/data_collection/network.rs @@ -1,34 +1,58 @@ use futures::StreamExt; use heim::net; use heim::units::information::byte; +use std::collections::BTreeMap; use std::time::Instant; use sysinfo::{NetworkExt, System, SystemExt}; -#[derive(Debug, Clone)] -/// Note all values are in bytes... -pub struct NetworkData { +#[derive(Clone, Debug)] +pub struct NetworkJoinPoint { + pub rx: f64, + pub tx: f64, + pub time_offset_milliseconds: f64, +} + +#[derive(Clone, Debug)] +pub struct NetworkStorage { + pub data_points: BTreeMap>)>, pub rx: u64, pub tx: u64, pub total_rx: u64, pub total_tx: u64, - pub instant: Instant, + pub last_collection_time: Instant, +} + +impl Default for NetworkStorage { + fn default() -> Self { + NetworkStorage { + data_points: BTreeMap::default(), + rx: 0, + tx: 0, + total_rx: 0, + total_tx: 0, + last_collection_time: Instant::now(), + } + } +} + +#[derive(Clone, Debug)] +/// Note all values are in bytes... +pub struct NetworkData { + pub rx: u64, + pub tx: u64, } pub async fn get_network_data( - sys: &System, prev_net_rx_bytes: &mut u64, prev_net_tx_bytes: &mut u64, - prev_net_access_time: &mut Instant, curr_time: &Instant, -) -> crate::utils::error::Result { + sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64, + curr_time: &Instant, +) -> NetworkData { + // FIXME: [WIN] Track current total bytes... also is this accurate? if cfg!(target_os = "windows") { let network_data = sys.get_network(); - - *prev_net_access_time = *curr_time; - Ok(NetworkData { + NetworkData { rx: network_data.get_income(), tx: network_data.get_outcome(), - total_rx: 0, - total_tx: 0, - instant: *prev_net_access_time, - }) + } } else { let mut io_data = net::io_counters(); let mut net_rx: u64 = 0; @@ -40,21 +64,23 @@ pub async fn get_network_data( net_tx += io.bytes_sent().get::(); } } - let cur_time = Instant::now(); - let elapsed_time = cur_time.duration_since(*prev_net_access_time).as_secs_f64(); - - let rx = ((net_rx - *prev_net_rx_bytes) as f64 / elapsed_time) as u64; - let tx = ((net_tx - *prev_net_tx_bytes) as f64 / elapsed_time) as u64; - - *prev_net_rx_bytes = net_rx; - *prev_net_tx_bytes = net_tx; - *prev_net_access_time = cur_time; - Ok(NetworkData { - rx, - tx, - total_rx: *prev_net_rx_bytes, - total_tx: *prev_net_tx_bytes, - instant: *prev_net_access_time, - }) + let elapsed_time = curr_time + .duration_since(*prev_net_access_time) + .as_secs_f64(); + + if *prev_net_rx == 0 { + *prev_net_rx = net_rx; + } + + if *prev_net_tx == 0 { + *prev_net_tx = net_tx; + } + + let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; + let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; + + *prev_net_rx = net_rx; + *prev_net_tx = net_tx; + NetworkData { rx, tx } } } diff --git a/src/data_conversion.rs b/src/data_conversion.rs index 5def8bf0..58322d97 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -399,64 +399,60 @@ pub fn update_network_data_points(app_data: &data_collection::Data) -> Converted } pub fn convert_network_data_points( - network_data: &[data_collection::network::NetworkData], + network_data: &data_collection::network::NetworkStorage, ) -> ConvertedNetworkData { let mut rx: Vec<(f64, f64)> = Vec::new(); let mut tx: Vec<(f64, f64)> = Vec::new(); - for data in network_data { - let current_time = std::time::Instant::now(); + let current_time = network_data.last_collection_time; + for (time, data) in &network_data.data_points { + let time_from_start: f64 = ((TIME_STARTS_FROM as f64 + - current_time.duration_since(*time).as_millis() as f64) + * 10_f64) + .floor(); + + // Insert in joiner points + if let Some(joiners) = &data.1 { + for joiner in joiners { + let offset_time = time_from_start - joiner.time_offset_milliseconds as f64 * 10_f64; + rx.push(( + offset_time, + if joiner.rx > 0.0 { + (joiner.rx).log(2.0) + } else { + 0.0 + }, + )); + + tx.push(( + offset_time, + if joiner.tx > 0.0 { + (joiner.tx).log(2.0) + } else { + 0.0 + }, + )); + } + } + + // Insert in main points let rx_data = ( - ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - if data.rx > 0 { - (data.rx as f64).log(2.0) + time_from_start, + if data.0.rx > 0 { + (data.0.rx as f64).log(2.0) } else { 0.0 }, ); let tx_data = ( - ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - if data.tx > 0 { - (data.tx as f64).log(2.0) + time_from_start, + if data.0.tx > 0 { + (data.0.tx as f64).log(2.0) } else { 0.0 }, ); - //debug!("Plotting: {:?} bytes rx, {:?} bytes tx", rx_data, tx_data); - - // Now, inject our joining points... - if !rx.is_empty() { - let previous_element_data = *(rx.last().unwrap()); - for idx in 0..50 { - rx.push(( - previous_element_data.0 - + ((rx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)), - previous_element_data.1 - + ((rx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)), - )); - } - } - - // Now, inject our joining points... - if !tx.is_empty() { - let previous_element_data = *(tx.last().unwrap()); - for idx in 0..50 { - tx.push(( - previous_element_data.0 - + ((tx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)), - previous_element_data.1 - + ((tx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)), - )); - } - } - rx.push(rx_data); tx.push(tx_data); } @@ -466,13 +462,8 @@ pub fn convert_network_data_points( let total_tx_converted_result: (f64, String); let tx_converted_result: (f64, String); - if let Some(last_num_bytes_entry) = network_data.last() { - rx_converted_result = get_exact_byte_values(last_num_bytes_entry.rx, false); - total_rx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_rx, false) - } else { - rx_converted_result = get_exact_byte_values(0, false); - total_rx_converted_result = get_exact_byte_values(0, false); - } + rx_converted_result = get_exact_byte_values(network_data.rx, false); + total_rx_converted_result = get_exact_byte_values(network_data.total_rx, false); let rx_display = format!("{:.*}{}", 1, rx_converted_result.0, rx_converted_result.1); let total_rx_display = if cfg!(not(target_os = "windows")) { format!( @@ -483,13 +474,8 @@ pub fn convert_network_data_points( "N/A".to_string() }; - if let Some(last_num_bytes_entry) = network_data.last() { - tx_converted_result = get_exact_byte_values(last_num_bytes_entry.tx, false); - total_tx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_tx, false); - } else { - tx_converted_result = get_exact_byte_values(0, false); - total_tx_converted_result = get_exact_byte_values(0, false); - } + tx_converted_result = get_exact_byte_values(network_data.tx, false); + total_tx_converted_result = get_exact_byte_values(network_data.total_tx, false); let tx_display = format!("{:.*}{}", 1, tx_converted_result.0, tx_converted_result.1); let total_tx_display = if cfg!(not(target_os = "windows")) { format!( diff --git a/src/main.rs b/src/main.rs index 47b55fb6..043ddcf5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -199,7 +199,7 @@ fn main() -> error::Result<()> { } futures::executor::block_on(data_state.update_data()); tx.send(Event::Update(Box::from(data_state.data.clone()))) - .unwrap(); + .unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it if first_run { // Fix for if you set a really long time for update periods (and just gives a faster first value) -- cgit v1.2.3 From 0fdab76cf5df3c874e27ab7d89cb6f16865023c1 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 21 Jan 2020 22:59:42 -0500 Subject: Tweaked network graph generation a bit to match master --- src/app/data_collection.rs | 11 +++++++---- src/canvas.rs | 6 +++--- 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 5042c6d6..8d3c4dbe 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -119,11 +119,14 @@ impl DataState { let mut new_joining_points = Vec::new(); - for idx in 0..100 { + let num_points = 50; + for idx in (0..num_points).rev() { new_joining_points.push(network::NetworkJoinPoint { - rx: prev_data.0.rx as f64 + rx_diff / 100.0 * idx as f64, - tx: prev_data.0.tx as f64 + tx_diff / 100.0 * idx as f64, - time_offset_milliseconds: time_gap / 100.0 * (100 - idx) as f64, + rx: prev_data.0.rx as f64 + + rx_diff / num_points as f64 * (num_points - idx) as f64, + tx: prev_data.0.tx as f64 + + tx_diff / num_points as f64 * (num_points - idx) as f64, + time_offset_milliseconds: time_gap / num_points as f64 * idx as f64, }); } Some(new_joining_points) diff --git a/src/canvas.rs b/src/canvas.rs index 4d087b9e..3734573e 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -131,11 +131,11 @@ fn gen_n_colours(num_to_gen: i32) -> Vec { // Generate colours let mut colour_vec: Vec = vec![ - Color::LightCyan, - Color::LightYellow, Color::Red, - Color::Green, + Color::LightYellow, Color::LightMagenta, + Color::LightCyan, + Color::Green, ]; let mut h: f32 = 0.4; // We don't need random colours... right? -- cgit v1.2.3 From e6b6048afb3ddfa89d308742f0be1eb5478ecf5c Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 21 Jan 2020 23:10:32 -0500 Subject: Further tweaking of network --- src/app/data_collection.rs | 19 +++++++++++++++++-- src/app/data_collection/network.rs | 19 ++++++++++++------- 2 files changed, 29 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 8d3c4dbe..0d07e084 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -37,6 +37,22 @@ pub struct Data { pub list_of_disks: Vec, } +impl Data { + pub fn first_run_cleanup(&mut self) { + self.list_of_cpu_packages = Vec::new(); + self.list_of_io = Vec::new(); + self.list_of_physical_io = Vec::new(); + self.memory = Vec::new(); + self.swap = Vec::new(); + self.list_of_temperature_sensor = Vec::new(); + self.list_of_processes = Vec::new(); + self.grouped_list_of_processes = None; + self.list_of_disks = Vec::new(); + + self.network.first_run(); + } +} + pub struct DataState { pub data: Data, first_run: bool, @@ -110,7 +126,6 @@ impl DataState { .get(&self.data.network.last_collection_time) { // If not empty, inject joining points - let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; let time_gap = current_instant @@ -188,7 +203,7 @@ impl DataState { ); if self.first_run { - self.data = Data::default(); + self.data.first_run_cleanup(); self.first_run = false; } diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs index c9b97de6..da646ad2 100644 --- a/src/app/data_collection/network.rs +++ b/src/app/data_collection/network.rs @@ -35,6 +35,14 @@ impl Default for NetworkStorage { } } +impl NetworkStorage { + pub fn first_run(&mut self) { + self.data_points = BTreeMap::default(); + self.rx = 0; + self.tx = 0; + } +} + #[derive(Clone, Debug)] /// Note all values are in bytes... pub struct NetworkData { @@ -68,13 +76,10 @@ pub async fn get_network_data( .duration_since(*prev_net_access_time) .as_secs_f64(); - if *prev_net_rx == 0 { - *prev_net_rx = net_rx; - } - - if *prev_net_tx == 0 { - *prev_net_tx = net_tx; - } + debug!( + "net rx: {}, net tx: {}, net prev rx: {}, net prev tx: {}", + net_rx, net_tx, *prev_net_rx, *prev_net_tx + ); let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; -- cgit v1.2.3 From fe99b99d0af3435636562dc4ca80a944e74ac926 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 21 Jan 2020 23:35:16 -0500 Subject: Removed btreemap and went back to vec as it makes more sense for us --- src/app/data_collection.rs | 10 +++------- src/app/data_collection/network.rs | 13 ++++--------- 2 files changed, 7 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 0d07e084..433f43d6 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -119,13 +119,9 @@ impl DataState { let joining_points: Option> = if !self.data.network.data_points.is_empty() { - if let Some(prev_data) = self - .data - .network - .data_points - .get(&self.data.network.last_collection_time) - { + if let Some(last_entry) = self.data.network.data_points.last() { // If not empty, inject joining points + let prev_data = &last_entry.1; let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; let time_gap = current_instant @@ -161,7 +157,7 @@ impl DataState { self.data .network .data_points - .insert(current_instant, (new_network_data, joining_points)); + .push((current_instant, (new_network_data, joining_points))); // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! push_if_valid( diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs index da646ad2..b1695409 100644 --- a/src/app/data_collection/network.rs +++ b/src/app/data_collection/network.rs @@ -1,7 +1,6 @@ use futures::StreamExt; use heim::net; use heim::units::information::byte; -use std::collections::BTreeMap; use std::time::Instant; use sysinfo::{NetworkExt, System, SystemExt}; @@ -12,9 +11,10 @@ pub struct NetworkJoinPoint { pub time_offset_milliseconds: f64, } +type NetworkDataGroup = (Instant, (NetworkData, Option>)); #[derive(Clone, Debug)] pub struct NetworkStorage { - pub data_points: BTreeMap>)>, + pub data_points: Vec, pub rx: u64, pub tx: u64, pub total_rx: u64, @@ -25,7 +25,7 @@ pub struct NetworkStorage { impl Default for NetworkStorage { fn default() -> Self { NetworkStorage { - data_points: BTreeMap::default(), + data_points: Vec::default(), rx: 0, tx: 0, total_rx: 0, @@ -37,7 +37,7 @@ impl Default for NetworkStorage { impl NetworkStorage { pub fn first_run(&mut self) { - self.data_points = BTreeMap::default(); + self.data_points = Vec::default(); self.rx = 0; self.tx = 0; } @@ -76,11 +76,6 @@ pub async fn get_network_data( .duration_since(*prev_net_access_time) .as_secs_f64(); - debug!( - "net rx: {}, net tx: {}, net prev rx: {}, net prev tx: {}", - net_rx, net_tx, *prev_net_rx, *prev_net_tx - ); - let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; -- cgit v1.2.3 From 13f6dfc529bf6702201c67aabec1d637f368e8c5 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sat, 25 Jan 2020 16:36:14 -0500 Subject: Reworked network again; will use this to change all widgets --- src/app.rs | 32 +++- src/app/data_collection.rs | 266 -------------------------- src/app/data_collection/cpu.rs | 33 ---- src/app/data_collection/disks.rs | 98 ---------- src/app/data_collection/mem.rs | 30 --- src/app/data_collection/network.rs | 86 --------- src/app/data_collection/processes.rs | 329 --------------------------------- src/app/data_collection/temperature.rs | 87 --------- src/app/data_harvester.rs | 233 +++++++++++++++++++++++ src/app/data_harvester/cpu.rs | 33 ++++ src/app/data_harvester/disks.rs | 98 ++++++++++ src/app/data_harvester/mem.rs | 30 +++ src/app/data_harvester/network.rs | 55 ++++++ src/app/data_harvester/processes.rs | 329 +++++++++++++++++++++++++++++++++ src/app/data_harvester/temperature.rs | 87 +++++++++ src/app/data_janitor.rs | 120 ++++++++++++ src/canvas.rs | 5 +- src/data_conversion.rs | 120 +++++------- src/main.rs | 43 +++-- 19 files changed, 1087 insertions(+), 1027 deletions(-) delete mode 100644 src/app/data_collection.rs delete mode 100644 src/app/data_collection/cpu.rs delete mode 100644 src/app/data_collection/disks.rs delete mode 100644 src/app/data_collection/mem.rs delete mode 100644 src/app/data_collection/network.rs delete mode 100644 src/app/data_collection/processes.rs delete mode 100644 src/app/data_collection/temperature.rs create mode 100644 src/app/data_harvester.rs create mode 100644 src/app/data_harvester/cpu.rs create mode 100644 src/app/data_harvester/disks.rs create mode 100644 src/app/data_harvester/mem.rs create mode 100644 src/app/data_harvester/network.rs create mode 100644 src/app/data_harvester/processes.rs create mode 100644 src/app/data_harvester/temperature.rs create mode 100644 src/app/data_janitor.rs (limited to 'src') diff --git a/src/app.rs b/src/app.rs index 244f3872..c6c13691 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,7 +1,10 @@ -pub mod data_collection; -use data_collection::{processes, temperature}; +pub mod data_harvester; +use data_harvester::{processes, temperature}; use std::time::Instant; +pub mod data_janitor; +use data_janitor::*; + use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result}; mod process_killer; @@ -30,6 +33,23 @@ lazy_static! { regex::Regex::new(".*"); } +/// AppConfigFields is meant to cover basic fields that would normally be set +/// by config files or launch options. Don't need to be mutable (set and forget). +pub struct AppConfigFields { + pub update_rate_in_milliseconds: u64, + pub temperature_type: temperature::TemperatureType, + pub use_dot: bool, +} + +/// AppScrollWidgetState deals with fields for a scrollable app's current state. +pub struct AppScrollWidgetState { + pub widget_scroll_position: i64, +} + +/// AppSearchState only deals with the search's state. +pub struct AppSearchState {} + +// TODO: [OPT] Group like fields together... this is kinda gross to step through pub struct App { // Sorting pub process_sorting_type: processes::ProcessSorting, @@ -49,7 +69,7 @@ pub struct App { pub update_rate_in_milliseconds: u64, pub show_average_cpu: bool, pub current_application_position: ApplicationPosition, - pub data: data_collection::Data, + pub data: data_harvester::Data, awaiting_second_char: bool, second_char: char, pub use_dot: bool, @@ -63,12 +83,13 @@ pub struct App { last_key_press: Instant, pub canvas_data: canvas::CanvasData, enable_grouping: bool, - enable_searching: bool, // TODO: [OPT] group together? + enable_searching: bool, current_search_query: String, searching_pid: bool, pub use_simple: bool, current_regex: std::result::Result, current_cursor_position: usize, + pub data_collection: DataCollection, } impl App { @@ -94,7 +115,7 @@ impl App { previous_disk_position: 0, previous_temp_position: 0, previous_cpu_table_position: 0, - data: data_collection::Data::default(), + data: data_harvester::Data::default(), awaiting_second_char: false, second_char: ' ', use_dot, @@ -114,6 +135,7 @@ impl App { use_simple: false, current_regex: BASE_REGEX.clone(), //TODO: [OPT] seems like a thing we can switch to lifetimes to avoid cloning current_cursor_position: 0, + data_collection: DataCollection::default(), } } diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs deleted file mode 100644 index 433f43d6..00000000 --- a/src/app/data_collection.rs +++ /dev/null @@ -1,266 +0,0 @@ -//! This is the main file to house data collection functions. - -use crate::{constants, utils::error::Result}; -use std::{collections::HashMap, time::Instant}; -use sysinfo::{System, SystemExt}; - -pub mod cpu; -pub mod disks; -pub mod mem; -pub mod network; -pub mod processes; -pub mod temperature; - -fn set_if_valid(result: &Result, value_to_set: &mut T) { - if let Ok(result) = result { - *value_to_set = (*result).clone(); - } -} - -fn push_if_valid(result: &Result, vector_to_push: &mut Vec) { - if let Ok(result) = result { - vector_to_push.push(result.clone()); - } -} - -#[derive(Clone, Debug, Default)] -pub struct Data { - pub list_of_cpu_packages: Vec, - pub list_of_io: Vec, - pub list_of_physical_io: Vec, - pub memory: Vec, - pub swap: Vec, - pub list_of_temperature_sensor: Vec, - pub network: network::NetworkStorage, - pub list_of_processes: Vec, - pub grouped_list_of_processes: Option>, - pub list_of_disks: Vec, -} - -impl Data { - pub fn first_run_cleanup(&mut self) { - self.list_of_cpu_packages = Vec::new(); - self.list_of_io = Vec::new(); - self.list_of_physical_io = Vec::new(); - self.memory = Vec::new(); - self.swap = Vec::new(); - self.list_of_temperature_sensor = Vec::new(); - self.list_of_processes = Vec::new(); - self.grouped_list_of_processes = None; - self.list_of_disks = Vec::new(); - - self.network.first_run(); - } -} - -pub struct DataState { - pub data: Data, - first_run: bool, - sys: System, - stale_max_seconds: u64, - prev_pid_stats: HashMap, - prev_idle: f64, - prev_non_idle: f64, - temperature_type: temperature::TemperatureType, - last_clean: Instant, // Last time stale data was cleared - use_current_cpu_total: bool, -} - -impl Default for DataState { - fn default() -> Self { - DataState { - data: Data::default(), - first_run: true, - sys: System::new(), - stale_max_seconds: constants::STALE_MAX_MILLISECONDS / 1000, - prev_pid_stats: HashMap::new(), - prev_idle: 0_f64, - prev_non_idle: 0_f64, - temperature_type: temperature::TemperatureType::Celsius, - last_clean: Instant::now(), - use_current_cpu_total: false, - } - } -} - -impl DataState { - pub fn set_temperature_type(&mut self, temperature_type: temperature::TemperatureType) { - self.temperature_type = temperature_type; - } - - pub fn set_use_current_cpu_total(&mut self, use_current_cpu_total: bool) { - self.use_current_cpu_total = use_current_cpu_total; - } - - pub fn init(&mut self) { - self.sys.refresh_all(); - } - - pub async fn update_data(&mut self) { - self.sys.refresh_system(); - - if !cfg!(target_os = "linux") { - // For now, might be just windows tbh - self.sys.refresh_processes(); - self.sys.refresh_network(); - } - - let current_instant = std::time::Instant::now(); - - // Network - let new_network_data = network::get_network_data( - &self.sys, - &self.data.network.last_collection_time, - &mut self.data.network.total_rx, - &mut self.data.network.total_tx, - ¤t_instant, - ) - .await; - - let joining_points: Option> = - if !self.data.network.data_points.is_empty() { - if let Some(last_entry) = self.data.network.data_points.last() { - // If not empty, inject joining points - let prev_data = &last_entry.1; - let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; - let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; - let time_gap = current_instant - .duration_since(self.data.network.last_collection_time) - .as_millis() as f64; - - let mut new_joining_points = Vec::new(); - - let num_points = 50; - for idx in (0..num_points).rev() { - new_joining_points.push(network::NetworkJoinPoint { - rx: prev_data.0.rx as f64 - + rx_diff / num_points as f64 * (num_points - idx) as f64, - tx: prev_data.0.tx as f64 - + tx_diff / num_points as f64 * (num_points - idx) as f64, - time_offset_milliseconds: time_gap / num_points as f64 * idx as f64, - }); - } - Some(new_joining_points) - } else { - None - } - } else { - None - }; - - // Set values - self.data.network.rx = new_network_data.rx; - self.data.network.tx = new_network_data.tx; - self.data.network.last_collection_time = current_instant; - - // Add new point - self.data - .network - .data_points - .push((current_instant, (new_network_data, joining_points))); - - // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! - push_if_valid( - &cpu::get_cpu_data_list(&self.sys, ¤t_instant), - &mut self.data.list_of_cpu_packages, - ); - - push_if_valid( - &mem::get_mem_data_list(¤t_instant).await, - &mut self.data.memory, - ); - push_if_valid( - &mem::get_swap_data_list(¤t_instant).await, - &mut self.data.swap, - ); - set_if_valid( - &processes::get_sorted_processes_list( - &self.sys, - &mut self.prev_idle, - &mut self.prev_non_idle, - &mut self.prev_pid_stats, - self.use_current_cpu_total, - ¤t_instant, - ), - &mut self.data.list_of_processes, - ); - - set_if_valid( - &disks::get_disk_usage_list().await, - &mut self.data.list_of_disks, - ); - push_if_valid( - &disks::get_io_usage_list(false).await, - &mut self.data.list_of_io, - ); - set_if_valid( - &temperature::get_temperature_data(&self.sys, &self.temperature_type).await, - &mut self.data.list_of_temperature_sensor, - ); - - if self.first_run { - self.data.first_run_cleanup(); - self.first_run = false; - } - - // Filter out stale timed entries - let clean_instant = Instant::now(); - if clean_instant.duration_since(self.last_clean).as_secs() > self.stale_max_seconds { - let stale_list: Vec<_> = self - .prev_pid_stats - .iter() - .filter(|&(_, &v)| { - clean_instant.duration_since(v.1).as_secs() > self.stale_max_seconds - }) - .map(|(k, _)| k.clone()) - .collect(); - for stale in stale_list { - self.prev_pid_stats.remove(&stale); - } - - // TODO: [OPT] cleaning stale network - - self.data.list_of_cpu_packages = self - .data - .list_of_cpu_packages - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - - self.data.memory = self - .data - .memory - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - - self.data.swap = self - .data - .swap - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - - self.data.list_of_io = self - .data - .list_of_io - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - - self.last_clean = clean_instant; - } - } -} diff --git a/src/app/data_collection/cpu.rs b/src/app/data_collection/cpu.rs deleted file mode 100644 index 4987a6a3..00000000 --- a/src/app/data_collection/cpu.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::time::Instant; -use sysinfo::{ProcessorExt, System, SystemExt}; - -#[derive(Debug, Clone)] -pub struct CPUData { - pub cpu_name: Box, - pub cpu_usage: f64, -} - -#[derive(Debug, Clone)] -pub struct CPUPackage { - pub cpu_vec: Vec, - pub instant: Instant, -} - -pub fn get_cpu_data_list( - sys: &System, curr_time: &Instant, -) -> crate::utils::error::Result { - let cpu_data = sys.get_processor_list(); - let mut cpu_vec = Vec::new(); - - for cpu in cpu_data { - cpu_vec.push(CPUData { - cpu_name: Box::from(cpu.get_name()), - cpu_usage: f64::from(cpu.get_cpu_usage()) * 100_f64, - }); - } - - Ok(CPUPackage { - cpu_vec, - instant: *curr_time, - }) -} diff --git a/src/app/data_collection/disks.rs b/src/app/data_collection/disks.rs deleted file mode 100644 index bf564ab2..00000000 --- a/src/app/data_collection/disks.rs +++ /dev/null @@ -1,98 +0,0 @@ -use futures::stream::StreamExt; -use heim::units::information; -use std::time::Instant; - -#[derive(Debug, Clone, Default)] -pub struct DiskData { - pub name: Box, - pub mount_point: Box, - pub free_space: u64, - pub used_space: u64, - pub total_space: u64, -} - -#[derive(Clone, Debug)] -pub struct IOData { - pub mount_point: Box, - pub read_bytes: u64, - pub write_bytes: u64, -} - -#[derive(Debug, Clone)] -pub struct IOPackage { - pub io_hash: std::collections::HashMap, - pub instant: Instant, -} - -pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result { - let mut io_hash: std::collections::HashMap = std::collections::HashMap::new(); - if get_physical { - let mut physical_counter_stream = heim::disk::io_counters_physical(); - while let Some(io) = physical_counter_stream.next().await { - let io = io?; - let mount_point = io.device_name().to_str().unwrap_or("Name Unavailable"); - io_hash.insert( - mount_point.to_string(), - IOData { - mount_point: Box::from(mount_point), - read_bytes: io.read_bytes().get::(), - write_bytes: io.write_bytes().get::(), - }, - ); - } - } else { - let mut counter_stream = heim::disk::io_counters(); - while let Some(io) = counter_stream.next().await { - let io = io?; - let mount_point = io.device_name().to_str().unwrap_or("Name Unavailable"); - io_hash.insert( - mount_point.to_string(), - IOData { - mount_point: Box::from(mount_point), - read_bytes: io.read_bytes().get::(), - write_bytes: io.write_bytes().get::(), - }, - ); - } - } - - Ok(IOPackage { - io_hash, - instant: Instant::now(), - }) -} - -pub async fn get_disk_usage_list() -> crate::utils::error::Result> { - let mut vec_disks: Vec = Vec::new(); - let mut partitions_stream = heim::disk::partitions_physical(); - - while let Some(part) = partitions_stream.next().await { - if let Ok(part) = part { - let partition = part; - let usage = heim::disk::usage(partition.mount_point().to_path_buf()).await?; - - vec_disks.push(DiskData { - free_space: usage.free().get::(), - used_space: usage.used().get::(), - total_space: usage.total().get::(), - mount_point: Box::from( - partition - .mount_point() - .to_str() - .unwrap_or("Name Unavailable"), - ), - name: Box::from( - partition - .device() - .unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable")) - .to_str() - .unwrap_or("Name Unavailable"), - ), - }); - } - } - - vec_disks.sort_by(|a, b| a.name.cmp(&b.name)); - - Ok(vec_disks) -} diff --git a/src/app/data_collection/mem.rs b/src/app/data_collection/mem.rs deleted file mode 100644 index 15d9c41f..00000000 --- a/src/app/data_collection/mem.rs +++ /dev/null @@ -1,30 +0,0 @@ -use heim::units::information; -use std::time::Instant; - -#[derive(Debug, Clone)] -pub struct MemData { - pub mem_total_in_mb: u64, - pub mem_used_in_mb: u64, - pub instant: Instant, -} - -pub async fn get_mem_data_list(curr_time: &Instant) -> crate::utils::error::Result { - let memory = heim::memory::memory().await?; - - Ok(MemData { - mem_total_in_mb: memory.total().get::(), - mem_used_in_mb: memory.total().get::() - - memory.available().get::(), - instant: *curr_time, - }) -} - -pub async fn get_swap_data_list(curr_time: &Instant) -> crate::utils::error::Result { - let memory = heim::memory::swap().await?; - - Ok(MemData { - mem_total_in_mb: memory.total().get::(), - mem_used_in_mb: memory.used().get::(), - instant: *curr_time, - }) -} diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs deleted file mode 100644 index b1695409..00000000 --- a/src/app/data_collection/network.rs +++ /dev/null @@ -1,86 +0,0 @@ -use futures::StreamExt; -use heim::net; -use heim::units::information::byte; -use std::time::Instant; -use sysinfo::{NetworkExt, System, SystemExt}; - -#[derive(Clone, Debug)] -pub struct NetworkJoinPoint { - pub rx: f64, - pub tx: f64, - pub time_offset_milliseconds: f64, -} - -type NetworkDataGroup = (Instant, (NetworkData, Option>)); -#[derive(Clone, Debug)] -pub struct NetworkStorage { - pub data_points: Vec, - pub rx: u64, - pub tx: u64, - pub total_rx: u64, - pub total_tx: u64, - pub last_collection_time: Instant, -} - -impl Default for NetworkStorage { - fn default() -> Self { - NetworkStorage { - data_points: Vec::default(), - rx: 0, - tx: 0, - total_rx: 0, - total_tx: 0, - last_collection_time: Instant::now(), - } - } -} - -impl NetworkStorage { - pub fn first_run(&mut self) { - self.data_points = Vec::default(); - self.rx = 0; - self.tx = 0; - } -} - -#[derive(Clone, Debug)] -/// Note all values are in bytes... -pub struct NetworkData { - pub rx: u64, - pub tx: u64, -} - -pub async fn get_network_data( - sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64, - curr_time: &Instant, -) -> NetworkData { - // FIXME: [WIN] Track current total bytes... also is this accurate? - if cfg!(target_os = "windows") { - let network_data = sys.get_network(); - NetworkData { - rx: network_data.get_income(), - tx: network_data.get_outcome(), - } - } else { - let mut io_data = net::io_counters(); - let mut net_rx: u64 = 0; - let mut net_tx: u64 = 0; - - while let Some(io) = io_data.next().await { - if let Ok(io) = io { - net_rx += io.bytes_recv().get::(); - net_tx += io.bytes_sent().get::(); - } - } - let elapsed_time = curr_time - .duration_since(*prev_net_access_time) - .as_secs_f64(); - - let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; - let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; - - *prev_net_rx = net_rx; - *prev_net_tx = net_tx; - NetworkData { rx, tx } - } -} diff --git a/src/app/data_collection/processes.rs b/src/app/data_collection/processes.rs deleted file mode 100644 index 069c1b29..00000000 --- a/src/app/data_collection/processes.rs +++ /dev/null @@ -1,329 +0,0 @@ -use crate::utils::error; -use std::cmp::Ordering; -use std::{collections::HashMap, process::Command, time::Instant}; -use sysinfo::{ProcessExt, System, SystemExt}; - -#[derive(Clone)] -pub enum ProcessSorting { - CPU, - MEM, - PID, - NAME, -} - -impl Default for ProcessSorting { - fn default() -> Self { - ProcessSorting::CPU - } -} - -#[derive(Debug, Clone, Default)] -pub struct ProcessData { - pub pid: u32, - pub cpu_usage_percent: f64, - pub mem_usage_percent: Option, - pub mem_usage_kb: Option, - pub name: String, - pub pid_vec: Option>, -} - -fn cpu_usage_calculation( - prev_idle: &mut f64, prev_non_idle: &mut f64, -) -> error::Result<(f64, f64)> { - // From SO answer: https://stackoverflow.com/a/23376195 - let mut path = std::path::PathBuf::new(); - path.push("/proc"); - path.push("stat"); - - let stat_results = std::fs::read_to_string(path)?; - let first_line: &str; - - let split_results = stat_results.split('\n').collect::>(); - if split_results.is_empty() { - return Err(error::BottomError::InvalidIO { - message: format!( - "Unable to properly split the stat results; saw {} values, expected at least 1 value.", - split_results.len() - ), - }); - } else { - first_line = split_results[0]; - } - - let val = first_line.split_whitespace().collect::>(); - - // SC in case that the parsing will fail due to length: - if val.len() <= 10 { - return Err(error::BottomError::InvalidIO { - message: format!( - "CPU parsing will fail due to too short of a return value; saw {} values, expected 10 values.", - val.len() - ), - }); - } - - let user: f64 = val[1].parse::<_>().unwrap_or(0_f64); - let nice: f64 = val[2].parse::<_>().unwrap_or(0_f64); - let system: f64 = val[3].parse::<_>().unwrap_or(0_f64); - let idle: f64 = val[4].parse::<_>().unwrap_or(0_f64); - let iowait: f64 = val[5].parse::<_>().unwrap_or(0_f64); - let irq: f64 = val[6].parse::<_>().unwrap_or(0_f64); - let softirq: f64 = val[7].parse::<_>().unwrap_or(0_f64); - let steal: f64 = val[8].parse::<_>().unwrap_or(0_f64); - let guest: f64 = val[9].parse::<_>().unwrap_or(0_f64); - - let idle = idle + iowait; - let non_idle = user + nice + system + irq + softirq + steal + guest; - - let total = idle + non_idle; - let prev_total = *prev_idle + *prev_non_idle; - - let total_delta: f64 = total - prev_total; - let idle_delta: f64 = idle - *prev_idle; - - //debug!("Vangelis function: CPU PERCENT: {}", (total_delta - idle_delta) / total_delta * 100_f64); - - *prev_idle = idle; - *prev_non_idle = non_idle; - - let result = if total_delta - idle_delta != 0_f64 { - total_delta - idle_delta - } else { - 1_f64 - }; - - let cpu_percentage = if total_delta != 0_f64 { - result / total_delta - } else { - 0_f64 - }; - - Ok((result, cpu_percentage)) -} - -fn get_ordering( - a_val: T, b_val: T, reverse_order: bool, -) -> std::cmp::Ordering { - match a_val.partial_cmp(&b_val) { - Some(x) => match x { - Ordering::Greater => { - if reverse_order { - std::cmp::Ordering::Less - } else { - std::cmp::Ordering::Greater - } - } - Ordering::Less => { - if reverse_order { - std::cmp::Ordering::Greater - } else { - std::cmp::Ordering::Less - } - } - Ordering::Equal => Ordering::Equal, - }, - None => Ordering::Equal, - } -} - -fn get_process_cpu_stats(pid: u32) -> std::io::Result { - let mut path = std::path::PathBuf::new(); - path.push("/proc"); - path.push(&pid.to_string()); - path.push("stat"); - - let stat_results = std::fs::read_to_string(path)?; - let val = stat_results.split_whitespace().collect::>(); - let utime = val[13].parse::().unwrap_or(0_f64); - let stime = val[14].parse::().unwrap_or(0_f64); - - //debug!("PID: {}, utime: {}, stime: {}", pid, utime, stime); - - Ok(utime + stime) // This seems to match top... -} - -/// Note that cpu_percentage should be represented WITHOUT the \times 100 factor! -fn linux_cpu_usage( - pid: u32, cpu_usage: f64, cpu_percentage: f64, - previous_pid_stats: &mut HashMap, use_current_cpu_total: bool, - curr_time: &Instant, -) -> std::io::Result { - // Based heavily on https://stackoverflow.com/a/23376195 and https://stackoverflow.com/a/1424556 - let before_proc_val: f64 = if previous_pid_stats.contains_key(&pid.to_string()) { - previous_pid_stats - .get(&pid.to_string()) - .unwrap_or(&(0_f64, *curr_time)) - .0 - } else { - 0_f64 - }; - let after_proc_val = get_process_cpu_stats(pid)?; - - /*debug!( - "PID - {} - Before: {}, After: {}, CPU: {}, Percentage: {}", - pid, - before_proc_val, - after_proc_val, - cpu_usage, - (after_proc_val - before_proc_val) / cpu_usage * 100_f64 - );*/ - - let entry = previous_pid_stats - .entry(pid.to_string()) - .or_insert((after_proc_val, *curr_time)); - *entry = (after_proc_val, *curr_time); - if use_current_cpu_total { - Ok((after_proc_val - before_proc_val) / cpu_usage * 100_f64) - } else { - Ok((after_proc_val - before_proc_val) / cpu_usage * 100_f64 * cpu_percentage) - } -} - -fn convert_ps( - process: &str, cpu_usage: f64, cpu_percentage: f64, - prev_pid_stats: &mut HashMap, use_current_cpu_total: bool, - curr_time: &Instant, -) -> std::io::Result { - if process.trim().to_string().is_empty() { - return Ok(ProcessData { - pid: 0, - name: "".to_string(), - mem_usage_percent: None, - mem_usage_kb: None, - cpu_usage_percent: 0_f64, - pid_vec: None, - }); - } - - let pid = (&process[..11]) - .trim() - .to_string() - .parse::() - .unwrap_or(0); - let name = (&process[11..61]).trim().to_string(); - let mem_usage_percent = Some( - (&process[62..]) - .trim() - .to_string() - .parse::() - .unwrap_or(0_f64), - ); - - Ok(ProcessData { - pid, - name, - mem_usage_percent, - mem_usage_kb: None, - cpu_usage_percent: linux_cpu_usage( - pid, - cpu_usage, - cpu_percentage, - prev_pid_stats, - use_current_cpu_total, - curr_time, - )?, - pid_vec: None, - }) -} - -pub fn get_sorted_processes_list( - sys: &System, prev_idle: &mut f64, prev_non_idle: &mut f64, - prev_pid_stats: &mut std::collections::HashMap, - use_current_cpu_total: bool, curr_time: &Instant, -) -> crate::utils::error::Result> { - let mut process_vector: Vec = Vec::new(); - - if cfg!(target_os = "linux") { - // Linux specific - this is a massive pain... ugh. - - let ps_result = Command::new("ps") - .args(&["-axo", "pid:10,comm:50,%mem:5", "--noheader"]) - .output()?; - let ps_stdout = String::from_utf8_lossy(&ps_result.stdout); - let split_string = ps_stdout.split('\n'); - //debug!("{:?}", split_string); - let cpu_calc = cpu_usage_calculation(prev_idle, prev_non_idle); - if let Ok((cpu_usage, cpu_percentage)) = cpu_calc { - let process_stream = split_string.collect::>(); - - for process in process_stream { - if let Ok(process_object) = convert_ps( - process, - cpu_usage, - cpu_percentage, - prev_pid_stats, - use_current_cpu_total, - curr_time, - ) { - if !process_object.name.is_empty() { - process_vector.push(process_object); - } - } - } - } else { - error!("Unable to properly parse CPU data in Linux."); - error!("Result: {:?}", cpu_calc.err()); - } - } else { - let process_hashmap = sys.get_process_list(); - for process_val in process_hashmap.values() { - let name = if process_val.name().is_empty() { - let process_cmd = process_val.cmd(); - if process_cmd.len() > 1 { - process_cmd[0].clone() - } else { - let process_exe = process_val.exe().file_stem(); - if let Some(exe) = process_exe { - let process_exe_opt = exe.to_str(); - if let Some(exe_name) = process_exe_opt { - exe_name.to_string() - } else { - "".to_string() - } - } else { - "".to_string() - } - } - } else { - process_val.name().to_string() - }; - - process_vector.push(ProcessData { - pid: process_val.pid() as u32, - name, - mem_usage_percent: None, - mem_usage_kb: Some(process_val.memory()), - cpu_usage_percent: f64::from(process_val.cpu_usage()), - pid_vec: None, - }); - } - } - - Ok(process_vector) -} - -pub fn sort_processes( - process_vector: &mut Vec, sorting_method: &ProcessSorting, reverse_order: bool, -) { - // Always sort alphabetically first! - process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, false)); - - match sorting_method { - ProcessSorting::CPU => { - process_vector.sort_by(|a, b| { - get_ordering(a.cpu_usage_percent, b.cpu_usage_percent, reverse_order) - }); - } - ProcessSorting::MEM => { - process_vector.sort_by(|a, b| { - get_ordering(a.mem_usage_percent, b.mem_usage_percent, reverse_order) - }); - } - ProcessSorting::PID => { - process_vector.sort_by(|a, b| get_ordering(a.pid, b.pid, reverse_order)); - } - ProcessSorting::NAME => { - process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, reverse_order)) - } - } -} diff --git a/src/app/data_collection/temperature.rs b/src/app/data_collection/temperature.rs deleted file mode 100644 index 1bee113e..00000000 --- a/src/app/data_collection/temperature.rs +++ /dev/null @@ -1,87 +0,0 @@ -use futures::StreamExt; -use heim::units::thermodynamic_temperature; -use std::cmp::Ordering; -use sysinfo::{ComponentExt, System, SystemExt}; - -#[derive(Debug, Clone)] -pub struct TempData { - pub component_name: Box, - pub temperature: f32, -} - -#[derive(Clone, Debug)] -pub enum TemperatureType { - Celsius, - Kelvin, - Fahrenheit, -} - -impl Default for TemperatureType { - fn default() -> Self { - TemperatureType::Celsius - } -} - -pub async fn get_temperature_data( - sys: &System, temp_type: &TemperatureType, -) -> crate::utils::error::Result> { - let mut temperature_vec: Vec = Vec::new(); - - if cfg!(target_os = "linux") { - let mut sensor_data = heim::sensors::temperatures(); - while let Some(sensor) = sensor_data.next().await { - if let Ok(sensor) = sensor { - temperature_vec.push(TempData { - component_name: Box::from(sensor.unit()), - temperature: match temp_type { - TemperatureType::Celsius => sensor - .current() - .get::( - ), - TemperatureType::Kelvin => { - sensor.current().get::() - } - TemperatureType::Fahrenheit => sensor - .current() - .get::( - ), - }, - }); - } - } - } else { - let sensor_data = sys.get_components_list(); - for component in sensor_data { - temperature_vec.push(TempData { - component_name: Box::from(component.get_label()), - temperature: match temp_type { - TemperatureType::Celsius => component.get_temperature(), - TemperatureType::Kelvin => component.get_temperature() + 273.15, - TemperatureType::Fahrenheit => { - (component.get_temperature() * (9.0 / 5.0)) + 32.0 - } - }, - }); - } - } - - // By default, sort temperature, then by alphabetically! Allow for configuring this... - - // Note we sort in reverse here; we want greater temps to be higher priority. - temperature_vec.sort_by(|a, b| match a.temperature.partial_cmp(&b.temperature) { - Some(x) => match x { - Ordering::Less => Ordering::Greater, - Ordering::Greater => Ordering::Less, - Ordering::Equal => Ordering::Equal, - }, - None => Ordering::Equal, - }); - - temperature_vec.sort_by(|a, b| { - a.component_name - .partial_cmp(&b.component_name) - .unwrap_or(Ordering::Equal) - }); - - Ok(temperature_vec) -} diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs new file mode 100644 index 00000000..c31f4962 --- /dev/null +++ b/src/app/data_harvester.rs @@ -0,0 +1,233 @@ +//! This is the main file to house data collection functions. + +use crate::{constants, utils::error::Result}; +use std::{collections::HashMap, time::Instant}; +use sysinfo::{System, SystemExt}; + +pub mod cpu; +pub mod disks; +pub mod mem; +pub mod network; +pub mod processes; +pub mod temperature; + +fn set_if_valid(result: &Result, value_to_set: &mut T) { + if let Ok(result) = result { + *value_to_set = (*result).clone(); + } +} + +fn push_if_valid(result: &Result, vector_to_push: &mut Vec) { + if let Ok(result) = result { + vector_to_push.push(result.clone()); + } +} + +#[derive(Clone, Debug)] +pub struct Data { + pub list_of_cpu_packages: Vec, + pub list_of_io: Vec, + pub memory: Vec, + pub swap: Vec, + pub list_of_temperature_sensor: Vec, + pub network: network::NetworkHarvest, + pub list_of_processes: Vec, + pub grouped_list_of_processes: Option>, + pub list_of_disks: Vec, + pub last_collection_time: Instant, +} + +impl Default for Data { + fn default() -> Self { + Data { + list_of_cpu_packages: Vec::default(), + list_of_io: Vec::default(), + memory: Vec::default(), + swap: Vec::default(), + list_of_temperature_sensor: Vec::default(), + list_of_processes: Vec::default(), + grouped_list_of_processes: None, + list_of_disks: Vec::default(), + network: network::NetworkHarvest::default(), + last_collection_time: Instant::now(), + } + } +} + +impl Data { + pub fn first_run_cleanup(&mut self) { + self.list_of_cpu_packages = Vec::new(); + self.list_of_io = Vec::new(); + self.memory = Vec::new(); + self.swap = Vec::new(); + self.list_of_temperature_sensor = Vec::new(); + self.list_of_processes = Vec::new(); + self.grouped_list_of_processes = None; + self.list_of_disks = Vec::new(); + } +} + +pub struct DataState { + pub data: Data, + sys: System, + stale_max_seconds: u64, + prev_pid_stats: HashMap, + prev_idle: f64, + prev_non_idle: f64, + temperature_type: temperature::TemperatureType, + last_clean: Instant, // Last time stale data was cleared + use_current_cpu_total: bool, +} + +impl Default for DataState { + fn default() -> Self { + DataState { + data: Data::default(), + sys: System::new(), + stale_max_seconds: constants::STALE_MAX_MILLISECONDS / 1000, + prev_pid_stats: HashMap::new(), + prev_idle: 0_f64, + prev_non_idle: 0_f64, + temperature_type: temperature::TemperatureType::Celsius, + last_clean: Instant::now(), + use_current_cpu_total: false, + } + } +} + +impl DataState { + pub fn set_temperature_type(&mut self, temperature_type: temperature::TemperatureType) { + self.temperature_type = temperature_type; + } + + pub fn set_use_current_cpu_total(&mut self, use_current_cpu_total: bool) { + self.use_current_cpu_total = use_current_cpu_total; + } + + pub fn init(&mut self) { + self.sys.refresh_all(); + } + + pub async fn update_data(&mut self) { + self.sys.refresh_system(); + + if !cfg!(target_os = "linux") { + // For now, might be just windows tbh + self.sys.refresh_processes(); + self.sys.refresh_network(); + } + + let current_instant = std::time::Instant::now(); + + // Network + self.data.network = network::get_network_data( + &self.sys, + &self.data.last_collection_time, + &mut self.data.network.total_rx, + &mut self.data.network.total_tx, + ¤t_instant, + ) + .await; + + // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! + push_if_valid( + &cpu::get_cpu_data_list(&self.sys, ¤t_instant), + &mut self.data.list_of_cpu_packages, + ); + + push_if_valid( + &mem::get_mem_data_list(¤t_instant).await, + &mut self.data.memory, + ); + push_if_valid( + &mem::get_swap_data_list(¤t_instant).await, + &mut self.data.swap, + ); + set_if_valid( + &processes::get_sorted_processes_list( + &self.sys, + &mut self.prev_idle, + &mut self.prev_non_idle, + &mut self.prev_pid_stats, + self.use_current_cpu_total, + ¤t_instant, + ), + &mut self.data.list_of_processes, + ); + + set_if_valid( + &disks::get_disk_usage_list().await, + &mut self.data.list_of_disks, + ); + push_if_valid( + &disks::get_io_usage_list(false).await, + &mut self.data.list_of_io, + ); + set_if_valid( + &temperature::get_temperature_data(&self.sys, &self.temperature_type).await, + &mut self.data.list_of_temperature_sensor, + ); + + self.data.last_collection_time = current_instant; + + // Filter out stale timed entries + let clean_instant = Instant::now(); + if clean_instant.duration_since(self.last_clean).as_secs() > self.stale_max_seconds { + let stale_list: Vec<_> = self + .prev_pid_stats + .iter() + .filter(|&(_, &v)| { + clean_instant.duration_since(v.1).as_secs() > self.stale_max_seconds + }) + .map(|(k, _)| k.clone()) + .collect(); + for stale in stale_list { + self.prev_pid_stats.remove(&stale); + } + + // TODO: [OPT] cleaning stale network + + self.data.list_of_cpu_packages = self + .data + .list_of_cpu_packages + .iter() + .cloned() + .filter(|entry| { + clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds + }) + .collect::>(); + + self.data.memory = self + .data + .memory + .iter() + .cloned() + .filter(|entry| { + clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds + }) + .collect::>(); + + self.data.swap = self + .data + .swap + .iter() + .cloned() + .filter(|entry| { + clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds + }) + .collect::>(); + + self.data.list_of_io = self + .data + .list_of_io + .iter() + .cloned() + .filter(|entry| { + clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds + }) + .collect::>(); + + self.last_clean = clean_instant; + } + } +} diff --git a/src/app/data_harvester/cpu.rs b/src/app/data_harvester/cpu.rs new file mode 100644 index 00000000..4987a6a3 --- /dev/null +++ b/src/app/data_harvester/cpu.rs @@ -0,0 +1,33 @@ +use std::time::Instant; +use sysinfo::{ProcessorExt, System, SystemExt}; + +#[derive(Debug, Clone)] +pub struct CPUData { + pub cpu_name: Box, + pub cpu_usage: f64, +} + +#[derive(Debug, Clone)] +pub struct CPUPackage { + pub cpu_vec: Vec, + pub instant: Instant, +} + +pub fn get_cpu_data_list( + sys: &System, curr_time: &Instant, +) -> crate::utils::error::Result { + let cpu_data = sys.get_processor_list(); + let mut cpu_vec = Vec::new(); + + for cpu in cpu_data { + cpu_vec.push(CPUData { + cpu_name: Box::from(cpu.get_name()), + cpu_usage: f64::from(cpu.get_cpu_usage()) * 100_f64, + }); + } + + Ok(CPUPackage { + cpu_vec, + instant: *curr_time, + }) +} diff --git a/src/app/data_harvester/disks.rs b/src/app/data_harvester/disks.rs new file mode 100644 index 00000000..bf564ab2 --- /dev/null +++ b/src/app/data_harvester/disks.rs @@ -0,0 +1,98 @@ +use futures::stream::StreamExt; +use heim::units::information; +use std::time::Instant; + +#[derive(Debug, Clone, Default)] +pub struct DiskData { + pub name: Box, + pub mount_point: Box, + pub free_space: u64, + pub used_space: u64, + pub total_space: u64, +} + +#[derive(Clone, Debug)] +pub struct IOData { + pub mount_point: Box, + pub read_bytes: u64, + pub write_bytes: u64, +} + +#[derive(Debug, Clone)] +pub struct IOPackage { + pub io_hash: std::collections::HashMap, + pub instant: Instant, +} + +pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result { + let mut io_hash: std::collections::HashMap = std::collections::HashMap::new(); + if get_physical { + let mut physical_counter_stream = heim::disk::io_counters_physical(); + while let Some(io) = physical_counter_stream.next().await { + let io = io?; + let mount_point = io.device_name().to_str().unwrap_or("Name Unavailable"); + io_hash.insert( + mount_point.to_string(), + IOData { + mount_point: Box::from(mount_point), + read_bytes: io.read_bytes().get::(), + write_bytes: io.write_bytes().get::(), + }, + ); + } + } else { + let mut counter_stream = heim::disk::io_counters(); + while let Some(io) = counter_stream.next().await { + let io = io?; + let mount_point = io.device_name().to_str().unwrap_or("Name Unavailable"); + io_hash.insert( + mount_point.to_string(), + IOData { + mount_point: Box::from(mount_point), + read_bytes: io.read_bytes().get::(), + write_bytes: io.write_bytes().get::(), + }, + ); + } + } + + Ok(IOPackage { + io_hash, + instant: Instant::now(), + }) +} + +pub async fn get_disk_usage_list() -> crate::utils::error::Result> { + let mut vec_disks: Vec = Vec::new(); + let mut partitions_stream = heim::disk::partitions_physical(); + + while let Some(part) = partitions_stream.next().await { + if let Ok(part) = part { + let partition = part; + let usage = heim::disk::usage(partition.mount_point().to_path_buf()).await?; + + vec_disks.push(DiskData { + free_space: usage.free().get::(), + used_space: usage.used().get::(), + total_space: usage.total().get::(), + mount_point: Box::from( + partition + .mount_point() + .to_str() + .unwrap_or("Name Unavailable"), + ), + name: Box::from( + partition + .device() + .unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable")) + .to_str() + .unwrap_or("Name Unavailable"), + ), + }); + } + } + + vec_disks.sort_by(|a, b| a.name.cmp(&b.name)); + + Ok(vec_disks) +} diff --git a/src/app/data_harvester/mem.rs b/src/app/data_harvester/mem.rs new file mode 100644 index 00000000..15d9c41f --- /dev/null +++ b/src/app/data_harvester/mem.rs @@ -0,0 +1,30 @@ +use heim::units::information; +use std::time::Instant; + +#[derive(Debug, Clone)] +pub struct MemData { + pub mem_total_in_mb: u64, +