summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClement Tsang <34804052+ClementTsang@users.noreply.github.com>2020-12-11 19:54:02 -0500
committerGitHub <noreply@github.com>2020-12-11 19:54:02 -0500
commit8c4ad90e6756a006243f04033ae9bd9bc9e2f095 (patch)
tree1e189aea1cb5f497ee7207c184528e77a82c2c01
parent86135e466ce437de5fd767b29fad9acae29cef93 (diff)
refactor: Another small optimization pass (#350)
Making some small changes that would hopefully improve performance a bit. - Remove redundant string generations for CPU data conversion - Switch to fnv for PID hashmap and hashsets - Use buffered reading to avoid having to store too many lines as strings
-rw-r--r--.gitignore1
-rw-r--r--Cargo.lock7
-rw-r--r--Cargo.toml4
-rw-r--r--src/app/data_harvester.rs6
-rw-r--r--src/app/data_harvester/processes.rs79
-rw-r--r--src/bin/main.rs8
-rw-r--r--src/data_conversion.rs96
-rw-r--r--src/lib.rs6
8 files changed, 122 insertions, 85 deletions
diff --git a/.gitignore b/.gitignore
index a0196a55..8b2115c0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,7 @@
rust-unmangle
*.svg
*.data
+*.data.old
# IntelliJ
.idea/
diff --git a/Cargo.lock b/Cargo.lock
index 0a59dc1a..1c9ff7ac 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -121,6 +121,7 @@ dependencies = [
"ctrlc",
"dirs-next",
"fern",
+ "fnv",
"futures",
"heim",
"indexmap",
@@ -390,6 +391,12 @@ dependencies = [
]
[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
name = "futures"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/Cargo.toml b/Cargo.toml
index 2865c5c8..70ff40a7 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,9 +19,10 @@ doc = false
[profile.release]
debug = 1
+lto = true
# debug = true
+# lto = false
opt-level = 3
-lto = "fat"
codegen-units = 1
[dependencies]
@@ -33,6 +34,7 @@ crossterm = "0.18.2"
ctrlc = {version = "3.1", features = ["termination"]}
clap = "2.33"
dirs-next = "2.0.0"
+fnv = "1.0.7"
futures = "0.3.8"
indexmap = "1.6.0"
itertools = "0.9.0"
diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs
index be1300f1..d7989c37 100644
--- a/src/app/data_harvester.rs
+++ b/src/app/data_harvester.rs
@@ -3,7 +3,7 @@
use std::time::Instant;
#[cfg(target_os = "linux")]
-use std::collections::HashMap;
+use fnv::FnvHashMap;
use sysinfo::{System, SystemExt};
@@ -73,7 +73,7 @@ pub struct DataCollector {
pub data: Data,
sys: System,
#[cfg(target_os = "linux")]
- pid_mapping: HashMap<crate::Pid, processes::PrevProcDetails>,
+ pid_mapping: FnvHashMap<crate::Pid, processes::PrevProcDetails>,
#[cfg(target_os = "linux")]
prev_idle: f64,
#[cfg(target_os = "linux")]
@@ -99,7 +99,7 @@ impl Default for DataCollector {
data: Data::default(),
sys: System::new_with_specifics(sysinfo::RefreshKind::new()),
#[cfg(target_os = "linux")]
- pid_mapping: HashMap::new(),
+ pid_mapping: FnvHashMap::default(),
#[cfg(target_os = "linux")]
prev_idle: 0_f64,
#[cfg(target_os = "linux")]
diff --git a/src/app/data_harvester/processes.rs b/src/app/data_harvester/processes.rs
index a2ad6252..94dcf635 100644
--- a/src/app/data_harvester/processes.rs
+++ b/src/app/data_harvester/processes.rs
@@ -6,7 +6,7 @@ use sysinfo::ProcessStatus;
use crate::utils::error::{self, BottomError};
#[cfg(target_os = "linux")]
-use std::collections::{hash_map::RandomState, HashMap};
+use fnv::{FnvHashMap, FnvHashSet};
#[cfg(not(target_os = "linux"))]
use sysinfo::{ProcessExt, ProcessorExt, System, SystemExt};
@@ -88,7 +88,7 @@ pub struct PrevProcDetails {
pub cpu_time: f64,
pub proc_stat_path: PathBuf,
// pub proc_statm_path: PathBuf,
- pub proc_exe_path: PathBuf,
+ // pub proc_exe_path: PathBuf,
pub proc_io_path: PathBuf,
pub proc_cmdline_path: PathBuf,
pub just_read: bool,
@@ -98,7 +98,7 @@ impl PrevProcDetails {
pub fn new(pid: Pid) -> Self {
PrevProcDetails {
proc_io_path: PathBuf::from(format!("/proc/{}/io", pid)),
- proc_exe_path: PathBuf::from(format!("/proc/{}/exe", pid)),
+ // proc_exe_path: PathBuf::from(format!("/proc/{}/exe", pid)),
proc_stat_path: PathBuf::from(format!("/proc/{}/stat", pid)),
// proc_statm_path: PathBuf::from(format!("/proc/{}/statm", pid)),
proc_cmdline_path: PathBuf::from(format!("/proc/{}/cmdline", pid)),
@@ -111,23 +111,17 @@ impl PrevProcDetails {
fn cpu_usage_calculation(
prev_idle: &mut f64, prev_non_idle: &mut f64,
) -> error::Result<(f64, f64)> {
+ use std::io::prelude::*;
+ use std::io::BufReader;
+
// From SO answer: https://stackoverflow.com/a/23376195
let mut path = std::path::PathBuf::new();
path.push("/proc");
path.push("stat");
- let stat_results = std::fs::read_to_string(path)?;
- let first_line: &str;
-
- let split_results = stat_results.split('\n').collect::<Vec<&str>>();
- if split_results.is_empty() {
- return Err(error::BottomError::InvalidIO(format!(
- "Unable to properly split the stat results; saw {} values, expected at least 1 value.",
- split_results.len()
- )));
- } else {
- first_line = split_results[0];
- }
+ let mut reader = BufReader::new(std::fs::File::open(path)?);
+ let mut first_line = String::new();
+ reader.read_line(&mut first_line)?;
let val = first_line.split_whitespace().collect::<Vec<&str>>();
@@ -177,20 +171,6 @@ fn cpu_usage_calculation(
}
#[cfg(target_os = "linux")]
-fn get_process_io(path: &PathBuf) -> std::io::Result<String> {
- Ok(std::fs::read_to_string(path)?)
-}
-
-#[cfg(target_os = "linux")]
-fn get_linux_process_io_usage(stat: &[&str]) -> (u64, u64) {
- // Represents read_bytes and write_bytes
- (
- stat[9].parse::<u64>().unwrap_or(0),
- stat[11].parse::<u64>().unwrap_or(0),
- )
-}
-
-#[cfg(target_os = "linux")]
fn get_linux_process_vsize_rss(stat: &[&str]) -> (u64, u64) {
// Represents vsize and rss (bytes and page numbers respectively)
(
@@ -200,6 +180,7 @@ fn get_linux_process_vsize_rss(stat: &[&str]) -> (u64, u64) {
}
#[cfg(target_os = "linux")]
+/// Preferably use this only on small files.
fn read_path_contents(path: &PathBuf) -> std::io::Result<String> {
Ok(std::fs::read_to_string(path)?)
}
@@ -246,11 +227,14 @@ fn get_linux_cpu_usage(
#[allow(clippy::too_many_arguments)]
#[cfg(target_os = "linux")]
-fn read_proc<S: core::hash::BuildHasher>(
+fn read_proc(
pid: Pid, cpu_usage: f64, cpu_fraction: f64,
- pid_mapping: &mut HashMap<Pid, PrevProcDetails, S>, use_current_cpu_total: bool,
+ pid_mapping: &mut FnvHashMap<Pid, PrevProcDetails>, use_current_cpu_total: bool,
time_difference_in_secs: u64, mem_total_kb: u64, page_file_kb: u64,
) -> error::Result<ProcessHarvest> {
+ use std::io::prelude::*;
+ use std::io::BufReader;
+
let pid_stat = pid_mapping
.entry(pid)
.or_insert_with(|| PrevProcDetails::new(pid));
@@ -321,11 +305,33 @@ fn read_proc<S: core::hash::BuildHasher>(
let mem_usage_bytes = mem_usage_kb * 1024;
// This can fail if permission is denied!
+
let (total_read_bytes, total_write_bytes, read_bytes_per_sec, write_bytes_per_sec) =
- if let Ok(io_results) = get_process_io(&pid_stat.proc_io_path) {
- let io_stats = io_results.split_whitespace().collect::<Vec<&str>>();
+ if let Ok(file) = std::fs::File::open(&pid_stat.proc_io_path) {
+ let reader = BufReader::new(file);
+ let mut lines = reader.lines().skip(4);
+
+ // Represents read_bytes and write_bytes, at the 5th and 6th lines (1-index, not 0-index)
+ let total_read_bytes = if let Some(Ok(read_bytes_line)) = lines.next() {
+ if let Some(read_bytes) = read_bytes_line.split_whitespace().last() {
+ read_bytes.parse::<u64>().unwrap_or(0)
+ } else {
+ 0
+ }
+ } else {
+ 0
+ };
+
+ let total_write_bytes = if let Some(Ok(write_bytes_line)) = lines.next() {
+ if let Some(write_bytes) = write_bytes_line.split_whitespace().last() {
+ write_bytes.parse::<u64>().unwrap_or(0)
+ } else {
+ 0
+ }
+ } else {
+ 0
+ };
- let (total_read_bytes, total_write_bytes) = get_linux_process_io_usage(&io_stats);
let read_bytes_per_sec = if time_difference_in_secs == 0 {
0
} else {
@@ -371,14 +377,13 @@ fn read_proc<S: core::hash::BuildHasher>(
#[cfg(target_os = "linux")]
pub fn get_process_data(
prev_idle: &mut f64, prev_non_idle: &mut f64,
- pid_mapping: &mut HashMap<Pid, PrevProcDetails, RandomState>, use_current_cpu_total: bool,
+ pid_mapping: &mut FnvHashMap<Pid, PrevProcDetails>, use_current_cpu_total: bool,
time_difference_in_secs: u64, mem_total_kb: u64, page_file_kb: u64,
) -> crate::utils::error::Result<Vec<ProcessHarvest>> {
// TODO: [PROC THREADS] Add threads
- use std::collections::HashSet;
if let Ok((cpu_usage, cpu_fraction)) = cpu_usage_calculation(prev_idle, prev_non_idle) {
- let mut pids_to_clear: HashSet<Pid> = pid_mapping.keys().cloned().collect();
+ let mut pids_to_clear: FnvHashSet<Pid> = pid_mapping.keys().cloned().collect();
let process_vector: Vec<ProcessHarvest> = std::fs::read_dir("/proc")?
.filter_map(|dir| {
if let Ok(dir) = dir {
diff --git a/src/bin/main.rs b/src/bin/main.rs
index 8e143a39..320b100a 100644
--- a/src/bin/main.rs
+++ b/src/bin/main.rs
@@ -215,8 +215,12 @@ fn main() -> Result<()> {
if app.used_widgets.use_cpu {
// CPU
- app.canvas_data.cpu_data =
- convert_cpu_data_points(&app.data_collection, false);
+
+ convert_cpu_data_points(
+ &app.data_collection,
+ &mut app.canvas_data.cpu_data,
+ false,
+ );
}
// Processes
diff --git a/src/data_conversion.rs b/src/data_conversion.rs
index 8b71cc36..2508d029 100644
--- a/src/data_conversion.rs
+++ b/src/data_conversion.rs
@@ -178,9 +178,9 @@ pub fn convert_disk_row(
}
pub fn convert_cpu_data_points(
- current_data: &data_farmer::DataCollection, is_frozen: bool,
-) -> Vec<ConvertedCpuData> {
- let mut cpu_data_vector: Vec<ConvertedCpuData> = Vec::new();
+ current_data: &data_farmer::DataCollection, existing_cpu_data: &mut Vec<ConvertedCpuData>,
+ is_frozen: bool,
+) {
let current_time = if is_frozen {
if let Some(frozen_instant) = current_data.frozen_instant {
frozen_instant
@@ -191,39 +191,62 @@ pub fn convert_cpu_data_points(
current_data.current_instant
};
- for (time, data) in &current_data.timed_data_vec {
- let time_from_start: f64 = (current_time.duration_since(*time).as_millis() as f64).floor();
-
- for (itx, cpu) in data.cpu_data.iter().enumerate() {
- // Check if the vector exists yet
- if cpu_data_vector.len() <= itx {
- let new_cpu_data = ConvertedCpuData {
- cpu_name: if let Some(cpu_harvest) = current_data.cpu_harvest.get(itx) {
- if let Some(cpu_count) = cpu_harvest.cpu_count {
- format!("{}{}", cpu_harvest.cpu_prefix, cpu_count)
+ // Initialize cpu_data_vector if the lengths don't match...
+ if let Some((_time, data)) = &current_data.timed_data_vec.last() {
+ if data.cpu_data.len() + 1 != existing_cpu_data.len() {
+ *existing_cpu_data = vec![ConvertedCpuData {
+ cpu_name: "All".to_string(),
+ short_cpu_name: "All".to_string(),
+ cpu_data: vec![],
+ legend_value: String::new(),
+ }];
+
+ existing_cpu_data.extend(
+ data.cpu_data
+ .iter()
+ .enumerate()
+ .map(|(itx, cpu_usage)| ConvertedCpuData {
+ cpu_name: if let Some(cpu_harvest) = current_data.cpu_harvest.get(itx) {
+ if let Some(cpu_count) = cpu_harvest.cpu_count {
+ format!("{}{}", cpu_harvest.cpu_prefix, cpu_count)
+ } else {
+ cpu_harvest.cpu_prefix.to_string()
+ }
} else {
- cpu_harvest.cpu_prefix.to_string()
- }
- } else {
- String::default()
- },
- short_cpu_name: if let Some(cpu_harvest) = current_data.cpu_harvest.get(itx) {
- if let Some(cpu_count) = cpu_harvest.cpu_count {
- cpu_count.to_string()
+ String::default()
+ },
+ short_cpu_name: if let Some(cpu_harvest) = current_data.cpu_harvest.get(itx)
+ {
+ if let Some(cpu_count) = cpu_harvest.cpu_count {
+ cpu_count.to_string()
+ } else {
+ cpu_harvest.cpu_prefix.to_string()
+ }
} else {
- cpu_harvest.cpu_prefix.to_string()
- }
- } else {
- String::default()
- },
- ..ConvertedCpuData::default()
- };
+ String::default()
+ },
+ legend_value: format!("{:.0}%", cpu_usage.round()),
+ cpu_data: vec![],
+ })
+ .collect::<Vec<ConvertedCpuData>>(),
+ );
+ } else {
+ existing_cpu_data
+ .iter_mut()
+ .skip(1)
+ .zip(&data.cpu_data)
+ .for_each(|(cpu, cpu_usage)| {
+ cpu.cpu_data = vec![];
+ cpu.legend_value = format!("{:.0}%", cpu_usage.round());
+ });
+ }
+ }
- cpu_data_vector.push(new_cpu_data);
- }
+ for (time, data) in &current_data.timed_data_vec {
+ let time_from_start: f64 = (current_time.duration_since(*time).as_millis() as f64).floor();
- if let Some(cpu_data) = cpu_data_vector.get_mut(itx) {
- cpu_data.legend_value = format!("{:.0}%", cpu.round());
+ for (itx, cpu) in data.cpu_data.iter().enumerate() {
+ if let Some(cpu_data) = existing_cpu_data.get_mut(itx + 1) {
cpu_data.cpu_data.push((-time_from_start, *cpu));
}
}
@@ -232,15 +255,6 @@ pub fn convert_cpu_data_points(
break;
}
}
-
- let mut extended_vec = vec![ConvertedCpuData {
- cpu_name: "All".to_string(),
- short_cpu_name: "All".to_string(),
- cpu_data: vec![],
- legend_value: String::new(),
- }];
- extended_vec.extend(cpu_data_vector);
- extended_vec
}
pub fn convert_mem_data_points(
diff --git a/src/lib.rs b/src/lib.rs
index 85f0d7e9..e245f1ef 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -309,7 +309,11 @@ pub fn handle_force_redraws(app: &mut App) {
}
if app.cpu_state.force_update.is_some() {
- app.canvas_data.cpu_data = convert_cpu_data_points(&app.data_collection, app.is_frozen);
+ convert_cpu_data_points(
+ &app.data_collection,
+ &mut app.canvas_data.cpu_data,
+ app.is_frozen,
+ );
app.cpu_state.force_update = None;
}