summaryrefslogtreecommitdiffstats
path: root/src/app/data_harvester/disks.rs
blob: 2d606c1499d4286c56d87befc61f2e75fc5b1abb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#[cfg(not(any(target_arch = "aarch64", target_arch = "arm")))]
use futures::stream::StreamExt;

#[derive(Debug, Clone, Default)]
pub struct DiskHarvest {
    pub name: String,
    pub mount_point: String,
    pub free_space: u64,
    pub used_space: u64,
    pub total_space: u64,
}

#[derive(Clone, Debug)]
pub struct IOData {
    pub read_bytes: u64,
    pub write_bytes: u64,
}

pub type IOHarvest = std::collections::HashMap<String, Option<IOData>>;

#[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
pub async fn get_io_usage(
    _sys: &sysinfo::System, _actually_get: bool,
) -> crate::utils::error::Result<Option<IOHarvest>> {
    let io_hash: std::collections::HashMap<String, Option<IOData>> =
        std::collections::HashMap::new();
    Ok(Some(io_hash))

    // TODO: Sysinfo disk I/O usage.
    // ...sadly, this cannot be done as of now (other than me writing my own), it requires further
    // work.  See https://github.com/GuillaumeGomez/sysinfo/issues/304.
}

#[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
pub async fn get_disk_usage(
    sys: &sysinfo::System, actually_get: bool,
) -> crate::utils::error::Result<Option<Vec<DiskHarvest>>> {
    use sysinfo::{DiskExt, SystemExt};

    if !actually_get {
        return Ok(None);
    }

    let mut vec_disks = sys
        .get_disks()
        .iter()
        .map(|disk| DiskHarvest {
            name: disk.get_name().to_string_lossy().into(),
            mount_point: disk.get_mount_point().to_string_lossy().into(),
            free_space: disk.get_available_space(),
            used_space: disk
                .get_total_space()
                .saturating_sub(disk.get_available_space()),
            total_space: disk.get_total_space(),
        })
        .collect::<Vec<DiskHarvest>>();
    vec_disks.sort_by(|a, b| a.name.cmp(&b.name));

    Ok(Some(vec_disks))
}

#[cfg(not(any(target_arch = "aarch64", target_arch = "arm")))]
pub async fn get_io_usage(
    get_physical: bool, actually_get: bool,
) -> crate::utils::error::Result<Option<IOHarvest>> {
    if !actually_get {
        return Ok(None);
    }

    let mut io_hash: std::collections::HashMap<String, Option<IOData>> =
        std::collections::HashMap::new();
    if get_physical {
        let mut physical_counter_stream = heim::disk::io_counters_physical();
        while let Some(io) = physical_counter_stream.next().await {
            if let Ok(io) = io {
                let mount_point = io.device_name().to_str().unwrap_or("Name Unavailable");
                io_hash.insert(
                    mount_point.to_string(),
                    Some(IOData {
                        read_bytes: io.read_bytes().get::<heim::units::information::megabyte>(),
                        write_bytes: io.write_bytes().get::<heim::units::information::megabyte>(),
                    }),
                );
            }
        }
    } else {
        let mut counter_stream = heim::disk::io_counters();
        while let Some(io) = counter_stream.next().await {
            if let Ok(io) = io {
                let mount_point = io.device_name().to_str().unwrap_or("Name Unavailable");
                io_hash.insert(
                    mount_point.to_string(),
                    Some(IOData {
                        read_bytes: io.read_bytes().get::<heim::units::information::byte>(),
                        write_bytes: io.write_bytes().get::<heim::units::information::byte>(),
                    }),
                );
            }
        }
    }

    Ok(Some(io_hash))
}

#[cfg(not(any(target_arch = "aarch64", target_arch = "arm")))]
pub async fn get_disk_usage(
    actually_get: bool,
) -> crate::utils::error::Result<Option<Vec<DiskHarvest>>> {
    if !actually_get {
        return Ok(None);
    }

    let mut vec_disks: Vec<DiskHarvest> = Vec::new();
    let mut partitions_stream = heim::disk::partitions_physical();

    while let Some(part) = partitions_stream.next().await {
        if let Ok(part) = part {
            let partition = part;
            let usage = heim::disk::usage(partition.mount_point().to_path_buf()).await?;

            vec_disks.push(DiskHarvest {
                free_space: usage.free().get::<heim::units::information::byte>(),
                used_space: usage.used().get::<heim::units::information::byte>(),
                total_space: usage.total().get::<heim::units::information::byte>(),
                mount_point: (partition
                    .mount_point()
                    .to_str()
                    .unwrap_or("Name Unavailable"))
                .to_string(),
                name: (partition
                    .device()
                    .unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable"))
                    .to_str()
                    .unwrap_or("Name Unavailable"))
                .to_string(),
            });
        }
    }

    vec_disks.sort_by(|a, b| a.name.cmp(&b.name));

    Ok(Some(vec_disks))
}