summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Cargo.toml2
-rw-r--r--docs/content/configuration/command-line-flags.md2
-rw-r--r--docs/content/configuration/config-file/flags.md2
-rw-r--r--docs/content/configuration/config-file/processes.md2
-rw-r--r--docs/content/usage/widgets/memory.md2
-rw-r--r--docs/content/usage/widgets/process.md11
-rw-r--r--docs/content/usage/widgets/temperature.md2
-rw-r--r--sample_configs/default_config.toml6
-rw-r--r--src/app.rs35
-rw-r--r--src/app/data_harvester.rs53
-rw-r--r--src/app/data_harvester/cpu.rs4
-rw-r--r--src/app/data_harvester/memory.rs3
-rw-r--r--src/app/data_harvester/memory/gpu.rs47
-rw-r--r--src/app/data_harvester/nvidia.rs145
-rw-r--r--src/app/data_harvester/processes.rs18
-rw-r--r--src/app/data_harvester/processes/linux.rs26
-rw-r--r--src/app/data_harvester/processes/unix/process_ext.rs6
-rw-r--r--src/app/data_harvester/processes/windows.rs26
-rw-r--r--src/app/data_harvester/temperature.rs13
-rw-r--r--src/app/data_harvester/temperature/linux.rs18
-rw-r--r--src/app/data_harvester/temperature/nvidia.rs40
-rw-r--r--src/app/data_harvester/temperature/sysinfo.rs5
-rw-r--r--src/app/query.rs149
-rw-r--r--src/args.rs6
-rw-r--r--src/constants.rs17
-rw-r--r--src/options.rs30
-rw-r--r--src/widgets/process_table.rs49
-rw-r--r--src/widgets/process_table/proc_widget_column.rs34
-rw-r--r--src/widgets/process_table/proc_widget_data.rs32
-rw-r--r--tests/arg_tests.rs4
30 files changed, 565 insertions, 224 deletions
diff --git a/Cargo.toml b/Cargo.toml
index 08329e4c..f81ce845 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -93,7 +93,7 @@ indexmap = "2.0.0"
itertools = "0.11.0"
kstring = { version = "2.0.0", features = ["arc"] }
log = { version = "0.4.20", optional = true }
-nvml-wrapper = { version = "0.9.0", optional = true }
+nvml-wrapper = { version = "0.9.0", optional = true, features = ["legacy-functions"] }
once_cell = "1.18.0"
regex = "1.9.4"
serde = { version = "=1.0.188 ", features = ["derive"] }
diff --git a/docs/content/configuration/command-line-flags.md b/docs/content/configuration/command-line-flags.md
index e0a7192f..17f78f8a 100644
--- a/docs/content/configuration/command-line-flags.md
+++ b/docs/content/configuration/command-line-flags.md
@@ -20,7 +20,7 @@ see information on these flags by running `btm -h`, or run `btm --help` to displ
| --disable_click | Disables mouse clicks. |
| -m, --dot_marker | Uses a dot marker for graphs. |
| --enable_cache_memory | Enable collecting and displaying cache and buffer memory. |
-| --enable_gpu_memory | Enable collecting and displaying GPU memory usage. |
+| --enable_gpu | Enable collecting and displaying GPU usage. |
| -e, --expanded | Expand the default widget upon starting the app. |
| -f, --fahrenheit | Sets the temperature type to Fahrenheit. |
| -g, --group | Groups processes with the same name by default. |
diff --git a/docs/content/configuration/config-file/flags.md b/docs/content/configuration/config-file/flags.md
index eeca23a3..ed9d0aef 100644
--- a/docs/content/configuration/config-file/flags.md
+++ b/docs/content/configuration/config-file/flags.md
@@ -38,7 +38,7 @@ each time:
| `network_use_binary_prefix` | Boolean | Displays the network widget with binary prefixes. |
| `network_use_bytes` | Boolean | Displays the network widget using bytes. |
| `network_use_log` | Boolean | Displays the network widget with a log scale. |
-| `enable_gpu_memory` | Boolean | Shows the GPU memory widget. |
+| `enable_gpu` | Boolean | Shows the GPU widgets. |
| `retention` | String (human readable time, such as "10m", "1h", etc.) | How much data is stored at once in terms of time. |
| `unnormalized_cpu` | Boolean | Show process CPU% without normalizing over the number of cores. |
| `expanded_on_startup` | Boolean | Expand the default widget upon starting the app. |
diff --git a/docs/content/configuration/config-file/processes.md b/docs/content/configuration/config-file/processes.md
index 2871e1cd..8a2c57b2 100644
--- a/docs/content/configuration/config-file/processes.md
+++ b/docs/content/configuration/config-file/processes.md
@@ -7,5 +7,5 @@ You can configure which columns are shown by the process widget by setting the `
```toml
[processes]
# Pick which columns you want to use in any order.
-columns = ["cpu%", "mem%", "pid", "name", "read", "write", "tread", "twrite", "state", "user", "time"]
+columns = ["cpu%", "mem%", "pid", "name", "read", "write", "tread", "twrite", "state", "user", "time", "gmem%", "gpu%"]
```
diff --git a/docs/content/usage/widgets/memory.md b/docs/content/usage/widgets/memory.md
index 5059d2bf..4304a3f7 100644
--- a/docs/content/usage/widgets/memory.md
+++ b/docs/content/usage/widgets/memory.md
@@ -13,7 +13,7 @@ If the total RAM or swap available is 0, then it is automatically hidden from th
One can also adjust the displayed time range through either the keyboard or mouse, with a range of 30s to 600s.
-This widget can also be configured to display Nvidia GPU memory usage (`--enable_gpu_memory`) or cache memory usage (`--enable_cache_memory`).
+This widget can also be configured to display Nvidia GPU memory usage (`--enable_gpu` on Linux/Windows) or cache memory usage (`--enable_cache_memory`).
## Key bindings
diff --git a/docs/content/usage/widgets/process.md b/docs/content/usage/widgets/process.md
index 2a11e3a7..83139da1 100644
--- a/docs/content/usage/widgets/process.md
+++ b/docs/content/usage/widgets/process.md
@@ -32,6 +32,12 @@ It can also additionally display the following columns:
- Process running time
+With the feature flag (`--enable_gpu` on Linux/Windows) and gpu process columns enabled in the configuration:
+
+- GPU memory use percentage
+- GPU core utilization percentage
+
+
See [the processes configuration page](../../configuration/config-file/processes.md) on how to customize which columns
are shown.
@@ -147,6 +153,9 @@ Note all keywords are case-insensitive. To search for a process/command that col
| `user` | `user=root` | Matches by user; supports regex |
| `state` | `state=running` | Matches by state; supports regex |
| `()` | `(<COND 1> AND <COND 2>) OR <COND 3>` | Group together a condition |
+| `gmem` | `gmem > 1000 b` | Matches the gpu memory column in terms of bytes; supports comparison operators |
+| `gmem%` | `gmem% < 0.5` | Matches the gpu memory column in terms of percent; supports comparison operators|
+| `gpu%` | `gpu% > 0` | Matches the gpu usage column in terms of percent; supports comparison operators |
#### Comparison operators
@@ -207,6 +216,8 @@ Note that key bindings are generally case-sensitive.
| ++I++ | Invert the current sort |
| ++"%"++ | Toggle between values and percentages for memory usage |
| ++t++ , ++f5++ | Toggle tree mode |
+| ++M++ | Sort by gpu memory usage, press again to reverse sorting order |
+| ++C++ | Sort by gpu usage, press again to reverse sorting order |
### Sort sub-widget
diff --git a/docs/content/usage/widgets/temperature.md b/docs/content/usage/widgets/temperature.md
index 34d28c98..3cadc9a8 100644
--- a/docs/content/usage/widgets/temperature.md
+++ b/docs/content/usage/widgets/temperature.md
@@ -10,6 +10,8 @@ The temperature widget provides a table of temperature sensors and their current
The temperature widget provides the sensor name as well as its current temperature.
+This widget can also be configured to display Nvidia GPU temperatures (`--enable_gpu` on Linux/Windows).
+
## Key bindings
Note that key bindings are generally case-sensitive.
diff --git a/sample_configs/default_config.toml b/sample_configs/default_config.toml
index e7b4058e..7d6a9f28 100644
--- a/sample_configs/default_config.toml
+++ b/sample_configs/default_config.toml
@@ -74,7 +74,7 @@
# Hides advanced options to stop a process on Unix-like systems.
#disable_advanced_kill = false
# Shows GPU(s) memory
-#enable_gpu_memory = false
+#enable_gpu = false
# Shows cache and buffer memory
#enable_cache_memory = false
# How much data is stored at once in terms of time.
@@ -83,7 +83,7 @@
# These are flags around the process widget.
#[processes]
-#columns = ["PID", "Name", "CPU%", "Mem%", "R/s", "W/s", "T.Read", "T.Write", "User", "State"]
+#columns = ["PID", "Name", "CPU%", "Mem%", "R/s", "W/s", "T.Read", "T.Write", "User", "State", "GMEM%", "GPU%"]
# These are all the components that support custom theming. Note that colour support
# will depend on terminal support.
@@ -103,7 +103,7 @@
#swap_color="LightYellow"
# Represents the colour ARC will use in the memory legend and graph.
#arc_color="LightCyan"
-# Represents the colour the GPU will use in the memory legend and graph.
+# Represents the colour the GPU will use in the legend and graph.
#gpu_core_colors=["LightGreen", "LightBlue", "LightRed", "Cyan", "Green", "Blue", "Red"]
# Represents the colour rx will use in the network legend and graph.
#rx_color="LightCyan"
diff --git a/src/app.rs b/src/app.rs
index 468eeed8..32f0af06 100644
--- a/src/app.rs
+++ b/src/app.rs
@@ -60,7 +60,7 @@ pub struct AppConfigFields {
pub use_old_network_legend: bool,
pub table_gap: u16,
pub disable_click: bool,
- pub enable_gpu_memory: bool,
+ pub enable_gpu: bool,
pub enable_cache_memory: bool,
pub show_table_scroll_position: bool,
pub is_advanced_kill: bool,
@@ -1277,6 +1277,30 @@ impl App {
disk.set_index(3);
}
}
+ #[cfg(feature = "gpu")]
+ 'M' => {
+ if let BottomWidgetType::Proc = self.current_widget.widget_type {
+ if let Some(proc_widget_state) = self
+ .states
+ .proc_state
+ .get_mut_widget_state(self.current_widget.widget_id)
+ {
+ proc_widget_state.select_column(ProcWidgetColumn::GpuMem);
+ }
+ }
+ }
+ #[cfg(feature = "gpu")]
+ 'C' => {
+ if let BottomWidgetType::Proc = self.current_widget.widget_type {
+ if let Some(proc_widget_state) = self
+ .states
+ .proc_state
+ .get_mut_widget_state(self.current_widget.widget_id)
+ {
+ proc_widget_state.select_column(ProcWidgetColumn::GpuUtil);
+ }
+ }
+ }
'?' => {
self.help_dialog_state.is_showing_help = true;
self.is_force_redraw = true;
@@ -2702,7 +2726,14 @@ impl App {
{
if (x >= *tlc_x && y >= *tlc_y) && (x <= *brc_x && y <= *brc_y)
{
- battery_widget_state.currently_selected_battery_index = itx;
+ if itx >= self.converted_data.battery_data.len() {
+ // range check to keep within current data
+ battery_widget_state.currently_selected_battery_index =
+ self.converted_data.battery_data.len() - 1;
+ } else {
+ battery_widget_state.currently_selected_battery_index =
+ itx;
+ }
break;
}
}
diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs
index e4571597..d140e29e 100644
--- a/src/app/data_harvester.rs
+++ b/src/app/data_harvester.rs
@@ -2,7 +2,7 @@
use std::time::{Duration, Instant};
-#[cfg(target_os = "linux")]
+#[cfg(any(target_os = "linux", feature = "gpu"))]
use hashbrown::HashMap;
#[cfg(feature = "battery")]
use starship_battery::{Battery, Manager};
@@ -125,6 +125,11 @@ pub struct DataCollector {
#[cfg(target_family = "unix")]
user_table: processes::UserTable,
+
+ #[cfg(feature = "gpu")]
+ gpu_pids: Option<Vec<HashMap<u32, (u64, u32)>>>,
+ #[cfg(feature = "gpu")]
+ gpus_total_mem: Option<u64>,
}
impl DataCollector {
@@ -153,6 +158,10 @@ impl DataCollector {
filters,
#[cfg(target_family = "unix")]
user_table: Default::default(),
+ #[cfg(feature = "gpu")]
+ gpu_pids: None,
+ #[cfg(feature = "gpu")]
+ gpus_total_mem: None,
}
}
@@ -288,18 +297,47 @@ impl DataCollector {
self.update_cpu_usage();
self.update_memory_usage();
- self.update_processes();
self.update_temps();
- self.update_network_usage();
- self.update_disks();
-
#[cfg(feature = "battery")]
self.update_batteries();
+ #[cfg(feature = "gpu")]
+ self.update_gpus(); // update_gpus before procs for gpu_pids but after temps for appending
+ self.update_processes();
+ self.update_network_usage();
+ self.update_disks();
// Update times for future reference.
self.last_collection_time = self.data.collection_time;
}
+ #[cfg(feature = "gpu")]
+ #[inline]
+ fn update_gpus(&mut self) {
+ if self.widgets_to_harvest.use_gpu {
+ #[cfg(feature = "nvidia")]
+ if let Some(data) = nvidia::get_nvidia_vecs(
+ &self.temperature_type,
+ &self.filters.temp_filter,
+ &self.widgets_to_harvest,
+ ) {
+ if let Some(mut temp) = data.temperature {
+ if let Some(sensors) = &mut self.data.temperature_sensors {
+ sensors.append(&mut temp);
+ } else {
+ self.data.temperature_sensors = Some(temp);
+ }
+ }
+ if let Some(mem) = data.memory {
+ self.data.gpu = Some(mem);
+ }
+ if let Some(proc) = data.procs {
+ self.gpu_pids = Some(proc.1);
+ self.gpus_total_mem = Some(proc.0);
+ }
+ }
+ }
+ }
+
#[inline]
fn update_cpu_usage(&mut self) {
if self.widgets_to_harvest.use_cpu {
@@ -365,11 +403,6 @@ impl DataCollector {
{
self.data.arc = memory::arc::get_arc_usage();
}
-
- #[cfg(feature = "gpu")]
- if self.widgets_to_harvest.use_gpu {
- self.data.gpu = memory::gpu::get_gpu_mem_usage();
- }
}
}
diff --git a/src/app/data_harvester/cpu.rs b/src/app/data_harvester/cpu.rs
index a827b26c..843df161 100644
--- a/src/app/data_harvester/cpu.rs
+++ b/src/app/data_harvester/cpu.rs
@@ -1,8 +1,4 @@
//! Data collection for CPU usage and load average.
-//!
-//! For CPU usage, Linux, macOS, and Windows are handled by Heim, FreeBSD by sysinfo.
-//!
-//! For load average, macOS and Linux are supported through Heim, FreeBSD by sysinfo.
pub mod sysinfo;
pub use self::sysinfo::*;
diff --git a/src/app/data_harvester/memory.rs b/src/app/data_harvester/memory.rs
index 2154b00c..dee65d8d 100644
--- a/src/app/data_harvester/memory.rs
+++ b/src/app/data_harvester/memory.rs
@@ -15,9 +15,6 @@ cfg_if::cfg_if! {
}
}
-#[cfg(feature = "gpu")]
-pub mod gpu;
-
#[cfg(feature = "zfs")]
pub mod arc;
diff --git a/src/app/data_harvester/memory/gpu.rs b/src/app/data_harvester/memory/gpu.rs
deleted file mode 100644
index 6fd66ba3..00000000
--- a/src/app/data_harvester/memory/gpu.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-use super::MemHarvest;
-
-/// Return GPU memory usage.
-#[cfg(feature = "gpu")]
-pub(crate) fn get_gpu_mem_usage() -> Option<Vec<(String, MemHarvest)>> {
- // As we add more support, expand on this.
-
- #[cfg(feature = "nvidia")]
- get_nvidia_mem_usage()
-}
-
-/// Returns the memory usage of NVIDIA cards.
-#[inline]
-#[cfg(feature = "nvidia")]
-fn get_nvidia_mem_usage() -> Option<Vec<(String, MemHarvest)>> {
- use crate::data_harvester::nvidia::NVML_DATA;
-
- if let Ok(nvml) = &*NVML_DATA {
- if let Ok(num_gpu) = nvml.device_count() {
- let mut results = Vec::with_capacity(num_gpu as usize);
- for i in 0..num_gpu {
- if let Ok(device) = nvml.device_by_index(i) {
- if let (Ok(name), Ok(mem)) = (device.name(), device.memory_info()) {
- // add device memory in bytes
- results.push((
- name,
- MemHarvest {
- total_bytes: mem.total,
- used_bytes: mem.used,
- use_percent: if mem.total == 0 {
- None
- } else {
- Some(mem.used as f64 / mem.total as f64 * 100.0)
- },
- },
- ));
- }
- }
- }
- Some(results)
- } else {
- None
- }
- } else {
- None
- }
-}
diff --git a/src/app/data_harvester/nvidia.rs b/src/app/data_harvester/nvidia.rs
index 9619da64..19abdb9e 100644
--- a/src/app/data_harvester/nvidia.rs
+++ b/src/app/data_harvester/nvidia.rs
@@ -1,4 +1,149 @@
+use hashbrown::HashMap;
+use nvml_wrapper::enum_wrappers::device::TemperatureSensor;
+use nvml_wrapper::enums::device::UsedGpuMemory;
use nvml_wrapper::{error::NvmlError, Nvml};
use once_cell::sync::Lazy;
+use crate::app::Filter;
+
+use crate::app::layout_manager::UsedWidgets;
+use crate::data_harvester::memory::MemHarvest;
+use crate::data_harvester::temperature::{
+ convert_temp_unit, is_temp_filtered, TempHarvest, TemperatureType,
+};
+
pub static NVML_DATA: Lazy<Result<Nvml, NvmlError>> = Lazy::new(Nvml::init);
+
+pub struct GpusData {
+ pub memory: Option<Vec<(String, MemHarvest)>>,
+ pub temperature: Option<Vec<TempHarvest>>,
+ pub procs: Option<(u64, Vec<HashMap<u32, (u64, u32)>>)>,
+}
+
+/// Returns the GPU data from NVIDIA cards.
+#[inline]
+pub fn get_nvidia_vecs(
+ temp_type: &TemperatureType, filter: &Option<Filter>, widgets_to_harvest: &UsedWidgets,
+) -> Option<GpusData> {
+ if let Ok(nvml) = &*NVML_DATA {
+ if let Ok(num_gpu) = nvml.device_count() {
+ let mut temp_vec = Vec::with_capacity(num_gpu as usize);
+ let mut mem_vec = Vec::with_capacity(num_gpu as usize);
+ let mut proc_vec = Vec::with_capacity(num_gpu as usize);
+ let mut total_mem = 0;
+ for i in 0..num_gpu {
+ if let Ok(device) = nvml.device_by_index(i) {
+ if let Ok(name) = device.name() {
+ if widgets_to_harvest.use_mem {
+ if let Ok(mem) = device.memory_info() {
+ mem_vec.push((
+ name.clone(),
+ MemHarvest {
+ total_bytes: mem.total,
+ used_bytes: mem.used,
+ use_percent: if mem.total == 0 {
+ None
+ } else {
+ Some(mem.used as f64 / mem.total as f64 * 100.0)
+ },
+ },
+ ));
+ }
+ }
+ if widgets_to_harvest.use_temp && is_temp_filtered(filter, &name) {
+ if let Ok(temperature) = device.temperature(TemperatureSensor::Gpu) {
+ let temperature = temperature as f32;
+ let temperature = convert_temp_unit(temperature, temp_type);
+ temp_vec.push(TempHarvest {
+ name: name.clone(),
+ temperature,
+ });
+ }
+ }
+ }
+ if widgets_to_harvest.use_proc {
+ let mut procs = HashMap::new();
+ if let Ok(gpu_procs) = device.process_utilization_stats(None) {
+ for proc in gpu_procs {
+ let pid = proc.pid;
+ let gpu_util = proc.sm_util + proc.enc_util + proc.dec_util;
+ procs.insert(pid, (0, gpu_util));
+ }
+ }
+ if let Ok(compute_procs) = device.running_compute_processes() {
+ for proc in compute_procs {
+ let pid = proc.pid;
+ let gpu_mem = match proc.used_gpu_memory {
+ UsedGpuMemory::Used(val) => val,
+ UsedGpuMemory::Unavailable => 0,
+ };
+ if let Some(prev) = procs.get(&pid) {
+ procs.insert(pid, (gpu_mem, prev.1));
+ } else {
+ procs.insert(pid, (gpu_mem, 0));
+ }
+ }
+ }
+ // Use the legacy API too but prefer newer API results
+ if let Ok(graphics_procs) = device.running_graphics_processes_v2() {
+ for proc in graphics_procs {
+ let pid = proc.pid;
+ let gpu_mem = match proc.used_gpu_memory {
+ UsedGpuMemory::Used(val) => val,
+ UsedGpuMemory::Unavailable => 0,
+ };
+ if let Some(prev) = procs.get(&pid) {
+ procs.insert(pid, (gpu_mem, prev.1));
+ } else {
+ procs.insert(pid, (gpu_mem, 0));
+ }
+ }
+ }
+ if let Ok(graphics_procs) = device.running_graphics_processes() {
+ for proc in graphics_procs {
+ let pid = proc.pid;
+ let gpu_mem = match proc.used_gpu_memory {
+ UsedGpuMemory::Used(val) => val,
+ UsedGpuMemory::Unavailable => 0,
+ };
+ if let Some(prev) = procs.get(&pid) {
+ procs.insert(pid, (gpu_mem, prev.1));
+ } else {
+ procs.insert(pid, (gpu_mem, 0));
+ }
+ }
+ }
+ if !procs.is_empty() {
+ proc_vec.push(procs);
+ }
+ // running total for proc %
+ if let Ok(mem) = device.memory_info() {
+ total_mem += mem.total;
+ }
+ }
+ }
+ }
+ Some(GpusData {
+ memory: if !mem_vec.is_empty() {
+ Some(mem_vec)
+ } else {
+ None
+ },
+ temperature: if !temp_vec.is_empty() {
+ Some(temp_vec)
+ } else {
+ None
+ },
+ procs: if !proc_vec.is_empty() {
+ Some((total_mem, proc_vec))
+ } else {
+ None
+ },
+ })
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+}
diff --git a/src/app/data_harvester/processes.rs b/src/app/data_harvester/processes.rs
index 5d14930e..7d042635 100644
--- a/src/app/data_harvester/processes.rs
+++ b/src/app/data_harvester/processes.rs
@@ -83,6 +83,18 @@ pub struct ProcessHarvest {
/// This is the process' user.
pub user: Cow<'static, str>,
+
+ /// Gpu memory usage as bytes.
+ #[cfg(feature = "gpu")]
+ pub gpu_mem: u64,
+
+ /// Gpu memory usage as percentage.
+ #[cfg(feature = "gpu")]
+ pub gpu_mem_percent: f32,
+
+ /// Gpu utilization as a percentage.
+ #[cfg(feature = "gpu")]
+ pub gpu_util: u32,
// TODO: Additional fields
// pub rss_kb: u64,
// pub virt_kb: u64,
@@ -98,6 +110,12 @@ impl ProcessHarvest {
self.total_read_bytes += rhs.total_read_bytes;
self.total_write_bytes += rhs.total_write_bytes;
self.time += rhs.time;
+ #[cfg(feature = "gpu")]
+ {
+ self.gpu_mem += rhs.gpu_mem;
+ self.gpu_util += rhs.gpu_util;
+ self.gpu_mem_percent += rhs.gpu_mem_percent;
+ }
}
}
diff --git a/src/app/data_harvester/processes/linux.rs b/src/app/data_harvester/processes/linux.rs
index 8f50823b..0a1ed539 100644
--- a/src/app/data_harvester/processes/linux.rs
+++ b/src/app/data_harvester/processes/linux.rs
@@ -250,6 +250,12 @@ fn read_proc(
uid,
user,
time,
+ #[cfg(feature = "gpu")]
+ gpu_mem: 0,
+ #[cfg(feature = "gpu")]
+ gpu_mem_percent: 0.0,
+ #[cfg(feature = "gpu")]
+ gpu_util: 0,
},
new_process_times,
))
@@ -326,7 +332,8 @@ pub(crate) fn linux_process_data(
let pid = process.pid;
let prev_proc_details = pid_mapping.entry(pid).or_default();
- if let Ok((process_harvest, new_process_times)) = read_proc(
+ #[allow(unused_mut)]
+ if let Ok((mut process_harvest, new_process_times)) = read_proc(
prev_proc_details,
process,
cpu_usage,
@@ -336,6 +343,23 @@ pub(crate) fn linux_process_data(
total_memory,
user_table,
) {
+ #[cfg(feature = "gpu")]
+ if let Some(gpus) = &collector.gpu_pids {
+ gpus.iter().for_each(|gpu| {
+ // add mem/util for all gpus to pid
+ if let Some((mem, util)) = gpu.get(&(pid as u32)) {
+ process_harvest.gpu_mem += mem;
+ process_harvest.gpu_util += util;
+ }
+ });
+ if let Some(gpu_total_mem) = &collector.gpus_total_mem {
+ process_harvest.gpu_mem_percent = (process_harvest.gpu_mem as f64
+ / *gpu_total_mem as f64
+ * 100.0)
+ as f32;
+ }
+ }
+
prev_proc_details.cpu_time = new_process_times;
prev_proc_details.total_read_bytes = process_harvest.total_read_bytes;
prev_proc_details.total_write_bytes = process_harvest.total_write_bytes;
diff --git a/src/app/data_harvester/processes/unix/process_ext.rs b/src/app/data_harvester/processes/unix/process_ext.rs
index 999a893f..93ef3a94 100644
--- a/src/app/data_harvester/processes/unix/process_ext.rs
+++ b/src/app/data_harvester/processes/unix/process_ext.rs
@@ -97,6 +97,12 @@ pub(crate) trait UnixProcessExt {
})
.unwrap_or_else(|| "N/A".into()),
time: Duration::from_secs(process_val.run_time()),
+ #[cfg(feature = "gpu")]
+ gpu_mem: 0,
+ #[cfg(feature = "gpu")]
+ gpu_mem_percent: 0.0,
+ #[cfg(feature = "gpu")]
+ gpu_util: 0,
});
}
diff --git a/src/app/data_harvester/processes/windows.rs b/src/app/data_harvest