summaryrefslogtreecommitdiffstats
path: root/src/utils
diff options
context:
space:
mode:
authorClement Tsang <34804052+ClementTsang@users.noreply.github.com>2022-11-29 03:53:58 -0500
committerGitHub <noreply@github.com>2022-11-29 03:53:58 -0500
commit9c3e60e74f0af93ed4ad7e8534db54e10219526a (patch)
treeb8b3a43c4ee0053560233b3a37578128f13b3c3d /src/utils
parent913c9ed5c67113ac3997b165f82cfd9543f7b572 (diff)
other: slightly reduce the CPU time spent for draws (#918)
* other: group all dataset draws in a time chart We used to draw each data set separately as a new canvas. Now, in one canvas, we draw all datasets. Note that this changes how dataset lines are drawn - rather than drawing one on top of another, it now draws kinda all at once. This effect is *kinda* a bit better IMO, but it might also look a bit more cluttered. * other: optimize truncate_text Flamegraphs showed that this area seems to be a bit heavy at times with some inefficient use of iterators and collection. This change should hopefully optimize this a bit by reducing some collections or reallocations. There can also be some further optimizations with less allocations from callers. * Reduce some redundant draws
Diffstat (limited to 'src/utils')
-rw-r--r--src/utils/gen_util.rs45
1 files changed, 36 insertions, 9 deletions
diff --git a/src/utils/gen_util.rs b/src/utils/gen_util.rs
index ab8c8479..60311105 100644
--- a/src/utils/gen_util.rs
+++ b/src/utils/gen_util.rs
@@ -1,7 +1,6 @@
use std::cmp::Ordering;
-use concat_string::concat_string;
-use tui::text::Text;
+use tui::text::{Span, Spans, Text};
use unicode_segmentation::UnicodeSegmentation;
pub const KILO_LIMIT: u64 = 1000;
@@ -99,14 +98,37 @@ pub fn get_decimal_prefix(quantity: u64, unit: &str) -> (f64, String) {
/// Truncates text if it is too long, and adds an ellipsis at the end if needed.
pub fn truncate_text<'a, U: Into<usize>>(content: &str, width: U) -> Text<'a> {
let width = width.into();
- let graphemes: Vec<&str> = UnicodeSegmentation::graphemes(content, true).collect();
-
- if graphemes.len() > width && width > 0 {
- // Truncate with ellipsis
- let first_n = graphemes[..(width - 1)].concat();
- Text::raw(concat_string!(first_n, "…"))
+ let mut graphemes = UnicodeSegmentation::graphemes(content, true);
+ let grapheme_len = {
+ let (_, upper) = graphemes.size_hint();
+ match upper {
+ Some(upper) => upper,
+ None => graphemes.clone().count(), // Don't think this ever fires.
+ }
+ };
+
+ let text = if grapheme_len > width {
+ let mut text = String::with_capacity(width);
+ // Truncate with ellipsis.
+
+ // Use a hack to reduce the size to size `width`. Think of it like removing
+ // The last `grapheme_len - width` graphemes, which reduces the length to
+ // `width` long.
+ //
+ // This is a way to get around the currently experimental`advance_back_by`.
+ graphemes.nth_back(grapheme_len - width);
+
+ text.push_str(graphemes.as_str());
+ text.push('…');
+
+ text
} else {
- Text::raw(content.to_string())
+ content.to_string()
+ };
+
+ // TODO: [OPT] maybe add interning here?
+ Text {
+ lines: vec![Spans(vec![Span::raw(text)])],
}
}
@@ -156,4 +178,9 @@ mod test {
y.sort_by(|a, b| sort_partial_fn(true)(a, b));
assert_eq!(y, vec![16.15, 15.0, 1.0, -1.0, -100.0, -100.0, -100.1]);
}
+
+ #[test]
+ fn test_truncation() {
+ // TODO: Add tests for `truncate_text`
+ }
}