summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrabite <rabite@posteo.de>2020-02-21 15:17:46 +0100
committerrabite <rabite@posteo.de>2020-05-24 21:02:46 +0200
commit5a176f8ac52f88febc176658e74e3ea5a34f0023 (patch)
treee80377ee998d4c6d833939b247a1353ccb3e9d53
parentb9f6e9c486f5846843359a09623cb022a5c53560 (diff)
barely working but fast
-rw-r--r--Cargo.lock28
-rw-r--r--Cargo.toml4
-rw-r--r--src/fail.rs2
-rw-r--r--src/file_browser.rs13
-rw-r--r--src/files.rs438
-rw-r--r--src/fscache.rs38
-rw-r--r--src/listview.rs81
-rw-r--r--src/main.rs5
-rw-r--r--src/preview.rs3
-rw-r--r--src/term.rs2
10 files changed, 415 insertions, 199 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 6bbbaf4..b80f4c1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -153,6 +153,11 @@ dependencies = [
]
[[package]]
+name = "bumpalo"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "byteorder"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -316,6 +321,14 @@ dependencies = [
]
[[package]]
+name = "dmsort"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "pdqsort 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "either"
version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -613,11 +626,13 @@ version = "1.3.5"
dependencies = [
"async_value 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"derivative 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"dirs-2 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "dmsort 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"gstreamer 0.14.5 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -644,6 +659,7 @@ dependencies = [
"signal-notify 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sixel 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"sixel-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "splay_tree 0.2.10",
"strip-ansi-escapes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"strum 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)",
"strum_macros 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1057,6 +1073,11 @@ dependencies = [
]
[[package]]
+name = "pdqsort"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "petgraph"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1428,6 +1449,10 @@ dependencies = [
]
[[package]]
+name = "splay_tree"
+version = "0.2.10"
+
+[[package]]
name = "strip-ansi-escapes"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1729,6 +1754,7 @@ dependencies = [
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400"
"checksum blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a"
+"checksum bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1f359dc14ff8911330a51ef78022d376f25ed00248912803b58f00cb1c27f742"
"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
"checksum bytesize 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "716960a18f978640f25101b5cbf1c6f6b0d3192fab36a2d98ca96f0ecbe41010"
"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
@@ -1748,6 +1774,7 @@ dependencies = [
"checksum derivative 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "942ca430eef7a3806595a6737bc388bf51adb888d3fc0dd1b50f1c170167ee3a"
"checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901"
"checksum dirs-2 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50b7e2b65c73137ec48935d50a5ae89b03150df566b7e14a1371df044e76765c"
+"checksum dmsort 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b99ba5967777d43de658426fb0263211abff58faf85b091fd2eb0bbd11499a"
"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
"checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9"
"checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08"
@@ -1824,6 +1851,7 @@ dependencies = [
"checksum parking_lot_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cb88cb1cb3790baa6776844f968fea3be44956cf184fa1be5a03341f5491278c"
"checksum parse-ansi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "50c48b4d524f8a10bf6ab37dc0b7583f17c8ec88b617b364ddfc3baee4dcf878"
"checksum pathbuftools 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "00f002a88874c85e30d4e133baae43cf382ceecf025f9493ce23eb381fcc922e"
+"checksum pdqsort 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ceca1642c89148ca05611cc775a0c383abef355fc4907c4e95f49f7b09d6287c"
"checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f"
"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677"
"checksum png 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "63daf481fdd0defa2d1d2be15c674fbfa1b0fd71882c303a91f9a79b3252c359"
diff --git a/Cargo.toml b/Cargo.toml
index f635cbe..ac0a739 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -46,7 +46,9 @@ itertools = "0.8"
nix = "0.17"
strip-ansi-escapes = "0.1"
crossbeam = "0.7"
-
+bumpalo = { versioon = "*", features = [ "collections" ] }
+splay_tree = { path = "../splay_tree" }
+dmsort = "0.1.3"
image = { version = "0.21.1", optional = true }
gstreamer = { version = "0.14", optional = true }
diff --git a/src/fail.rs b/src/fail.rs
index 519de19..c3b466a 100644
--- a/src/fail.rs
+++ b/src/fail.rs
@@ -144,7 +144,7 @@ impl HError {
}
pub fn preview_failed<T>(file: &crate::files::File) -> HResult<T> {
let name = file.name.clone();
- Err(HError::PreviewFailed{ file: name })
+ Err(HError::PreviewFailed{ file: name.to_string() })
}
diff --git a/src/file_browser.rs b/src/file_browser.rs
index f3cec19..e464028 100644
--- a/src/file_browser.rs
+++ b/src/file_browser.rs
@@ -673,6 +673,7 @@ impl FileBrowser {
}
pub fn update_preview(&mut self) -> HResult<()> {
+ return Ok(());
if !self.main_async_widget_mut()?.ready() { return Ok(()) }
if self.main_widget()?
.content
@@ -904,7 +905,7 @@ impl FileBrowser {
}
pub fn quit_with_dir(&self) -> HResult<()> {
- let cwd = self.cwd()?.clone().path;
+ let cwd = self.cwd()?.clone().path.clone();
let selected_file = self.selected_file()?;
let selected_file = selected_file.path.to_string_lossy();
let selected_files = self.selected_files()?;
@@ -1382,11 +1383,11 @@ impl FileBrowser {
let count_xpos = xsize - file_count.len() as u16;
let count_ypos = ypos + self.get_coordinates()?.ysize();
- let fs = self.fs_stat.read()?.find_fs(&file.path)?.clone();
+ //let fs = self.fs_stat.read()?.find_fs(&file.path)?.clone();
- let dev = fs.get_dev().unwrap_or(String::from(""));
- let free_space = fs.get_free();
- let total_space = fs.get_total();
+ let dev = String::from("");
+ let free_space = 0;
+ let total_space = 0;
let space = format!("{}{} / {}",
dev,
free_space,
@@ -1485,7 +1486,7 @@ impl Widget for FileBrowser {
fn refresh(&mut self) -> HResult<()> {
self.set_title().log();
self.columns.refresh().log();
- self.set_left_selection().log();
+ //self.set_left_selection().log();
self.set_cwd().log();
if !self.columns.zoom_active { self.update_preview().log(); }
self.columns.refresh().log();
diff --git a/src/files.rs b/src/files.rs
index f78f067..09172cd 100644
--- a/src/files.rs
+++ b/src/files.rs
@@ -1,15 +1,19 @@
+#![feature(vec_into_raw_parts)]
+
use std::cmp::Ord;
use std::collections::{HashMap, HashSet};
use std::ops::Index;
use std::fs::Metadata;
use std::os::unix::fs::MetadataExt;
use std::path::{Path, PathBuf};
-use std::sync::{Arc, RwLock};
+use std::sync::{Arc, RwLock, Mutex};
use std::sync::mpsc::Sender;
use std::hash::{Hash, Hasher};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::ffi::OsStr;
+use std::cell::RefCell;
+
use failure;
use failure::Fail;
@@ -31,6 +35,7 @@ use nix::{dir::*,
use pathbuftools::PathBufTools;
use async_value::{Async, Stale, StopIter};
+use bumpalo::Bump;
use crate::fail::{HResult, HError, ErrorLog};
use crate::dirty::{DirtyBit, Dirtyable};
@@ -44,6 +49,7 @@ lazy_static! {
static ref ICONS: Icons = Icons::new();
static ref IOTICK_CLIENTS: AtomicUsize = AtomicUsize::default();
static ref IOTICK: AtomicUsize = AtomicUsize::default();
+ static ref ALLOC: std::sync::Mutex<Bump> = std::sync::Mutex::new(Bump::new());
}
pub fn tick_str() -> &'static str {
@@ -59,8 +65,7 @@ pub fn tick_str() -> &'static str {
pub fn start_ticking(sender: Sender<Events>) {
use std::time::Duration;
- IOTICK_CLIENTS.fetch_add(1, Ordering::Relaxed);
- if IOTICK_CLIENTS.load(Ordering::Relaxed) == 1 {
+ if IOTICK_CLIENTS.fetch_add(1, Ordering::Relaxed == 0 {
std::thread::spawn(move || {
IOTICK.store(0, Ordering::Relaxed);
@@ -153,10 +158,10 @@ pub fn import_tags() -> HResult<()> {
pub fn check_tag(path: &PathBuf) -> HResult<bool> {
tags_loaded()?;
- let tagged = TAGS.read()?.1.binary_search(path)
- .map_or_else(|_| false,
- |_| true);
- Ok(tagged)
+ // let tagged = TAGS.read()?.1.binary_search(path)
+ // .map_or_else(|_| false,
+ // |_| true);
+ Ok(false)
}
pub fn tags_loaded() -> HResult<()> {
@@ -206,7 +211,7 @@ impl RefreshPackage {
let mut new_files = Vec::with_capacity(event_count);
// Files that need rerendering to make all changes visible (size, etc.)
- let mut changed_files = HashSet::with_capacity(event_count);
+ //let mut changed_files = HashSet::with_capacity(event_count);
// Save deletions to delete them efficiently later
let mut deleted_files = HashSet::with_capacity(event_count);
@@ -229,16 +234,16 @@ impl RefreshPackage {
}
Change(file) => {
if let Some(&fpos) = file_pos_map.get(&file) {
- let job = files.files[fpos].refresh_meta_job();
- jobs.push(job);
- changed_files.insert(file);
+ // let job = files[fpos].refresh_meta_job();
+ // jobs.push(job);
+ // changed_files.insert(file);
}
}
Rename(old, new) => {
if let Some(&fpos) = file_pos_map.get(&old) {
- files.files[fpos].rename(&new.path).log();
- let job = files.files[fpos].refresh_meta_job();
- jobs.push(job);
+ // files[fpos].rename(&new.path).log();
+ // let job = files[fpos].refresh_meta_job();
+ // jobs.push(job);
}
}
Remove(file) => {
@@ -258,9 +263,9 @@ impl RefreshPackage {
}
}
- if deleted_files.len() > 0 {
- files.files.retain(|file| !deleted_files.contains(file));
- }
+ // if deleted_files.len() > 0 {
+ // files.files.retain(|file| !deleted_files.contains(file));
+ // }
// Finally add all new files
files.files.extend(new_files);
@@ -275,12 +280,12 @@ impl RefreshPackage {
(std::mem::take(&mut files.files), files.len)
} else {
let placeholder = File::new_placeholder(&files.directory.path).unwrap();
- files.files.push(placeholder);
+ files.files.insert(placeholder);
(std::mem::take(&mut files.files), 1)
};
RefreshPackage {
- new_files: Some(files),
+ new_files: Some(files.into_iter().collect()),
new_len: new_len,
jobs: jobs
}
@@ -292,11 +297,16 @@ pub type Job = (PathBuf,
Option<Arc<RwLock<Option<Metadata>>>>,
Option<Arc<(AtomicBool, AtomicUsize)>>);
+// unsafe impl Sync for Files {}
+// unsafe impl Send for Files {}
+// unsafe impl Sync for File {}
+// unsafe impl Send for File {}
+
#[derive(Derivative)]
#[derivative(PartialEq, Eq, Hash, Clone, Debug)]
pub struct Files {
pub directory: File,
- pub files: Vec<File>,
+ pub files: SplaySet<File>,
pub len: usize,
#[derivative(Debug="ignore")]
#[derivative(PartialEq="ignore")]
@@ -325,13 +335,17 @@ pub struct Files {
#[derivative(Debug="ignore")]
#[derivative(PartialEq="ignore")]
#[derivative(Hash="ignore")]
- pub stale: Option<Stale>
+ pub stale: Option<Stale>,
+ #[derivative(Debug="ignore")]
+ #[derivative(PartialEq="ignore")]
+ #[derivative(Hash="ignore")]
+ alloc: Option<Allocator>
}
impl Index<usize> for Files {
type Output = File;
fn index(&self, pos: usize) -> &File {
- &self.files[pos]
+ &self.files.as_vec_like().get(pos).unwrap()
}
}
@@ -356,7 +370,7 @@ impl Default for Files {
fn default() -> Files {
Files {
directory: File::new_placeholder(Path::new("")).unwrap(),
- files: vec![],
+ files: SplaySet::new(),
len: 0,
pending_events: Arc::new(RwLock::new(vec![])),
refresh: None,
@@ -370,7 +384,8 @@ impl Default for Files {
dirty: DirtyBit::new(),
jobs: vec![],
cache: None,
- stale: None
+ stale: None,
+ alloc: None
}
}
}
@@ -396,6 +411,37 @@ pub struct linux_dirent {
pub d_name: [u8; 0],
}
+impl std::cmp::PartialOrd for File {
+ fn partial_cmp(&self, other: &File) -> Option<std::cmp::Ordering> {
+ Some(self.name.cmp(&other.name))
+ }
+}
+
+impl std::cmp::Ord for File {
+ fn cmp(&self, other: &File) -> std::cmp::Ordering {
+ use std::cmp::Ordering::*;
+ let a = self;
+ let b = other;
+
+ let dircmp = move |a: &File, b: &File| {
+ match (a.is_dir(), b.is_dir()) {
+ (true, false) => Less,
+ (false, true) => Greater,
+ _ => Equal
+ }
+ };
+
+ let namecmp = move |a: &File, b: &File| {
+ compare(&a.name, &b.name)
+ };
+
+ match dircmp(a, b) {
+ Equal => namecmp(a, b),
+ ord @ _ => ord
+ }
+ }
+}
+
// This arcane spell hastens the target by around 30%.
@@ -406,7 +452,8 @@ pub struct linux_dirent {
// mostly looked up in man 2 getdents64, the nc crate, and the
// upcoming version of the walkdir crate, plus random examples here
// and there..
-
+use splay_tree::SplaySet;
+use crate::alloc::Allocator;
// This should probably be replaced with walkdir when it gets a proper
// release with the new additions. nc itself is already too high level
// to meet the performance target, unfortunately.
@@ -415,56 +462,65 @@ pub struct linux_dirent {
// report the kind of file in d_type. Currently that means calling
// stat on ALL files and ithrowing away the result. This is wasteful.
#[cfg(target_os = "linux")]
-pub fn from_getdents(fd: i32, path: &Path, nothidden: &AtomicUsize) -> Result<Vec<File>, FileError>
+pub fn from_getdents(fd: i32,
+ path: &Path,
+ nothidden: &AtomicUsize,
+ alloc: &Allocator) -> Result<SplaySet<File>, FileError>
{
use libc::SYS_getdents64;
// Buffer size was chosen after measuring different sizes and 4k seemed best
- const BUFFER_SIZE: usize = 1024 * 1024 * 4;
+ const BUFFER_SIZE: usize = 1024 * 1024 * 2 * 4;
+ const BUFFER_SUBSLICE_SIZE: usize = BUFFER_SIZE / 4;
// Abuse Vec<u8> as byte buffer
let mut buf: Vec<u8> = vec![0; BUFFER_SIZE];
- let bufptr = buf.as_mut_ptr();
-
- // Store all the converted (to File) entries in here
- let files = std::sync::Mutex::new(Vec::<File>::new());
- let files = &files;
- // State of the getdents loop
- enum DentStatus {
- More(Vec<File>),
- Skip,
- Done,
- Err(FileError)
- }
+ let path_ostr = path.as_os_str();
+ let path_len = path_ostr.len();
+ let files: Mutex<SplaySet<File>> = Mutex::new(SplaySet::new());
let result = crossbeam::scope(|s| {
+ let files = &files;
+
loop {
+ // let (buf, localfiles, used) = get_buffers();
+ let bufptr = buf.as_mut_ptr() as usize;
+ // let mut localfiles: &mut Vec<_> = localfiles;
+
+ //dbg!(&bufptr);
+
// Returns number of bytes written to buffer
- let nread = unsafe { libc::syscall(SYS_getdents64, fd, bufptr, BUFFER_SIZE) };
+ let nread = unsafe { libc::syscall(SYS_getdents64,
+ fd,
+ bufptr,
+ BUFFER_SUBSLICE_SIZE) };
// 0 means done, -1 means an error happened
- if nread == 0 {
+ if dbg!(nread) == 0 {
break;
} else if nread < 0 {
let pathstr = path.to_string_lossy().to_string();
HError::log::<()>(&format!("Couldn't read dents from: {}",
&pathstr)).ok();
- break;
+ return;
}
- // Clone buffer for parallel processing in another thread
- let mut buf: Vec<u8> = buf.clone();
- s.spawn(move |_| {
+ // // Clone buffer for parallel processing in another thread
+ let mut buf: Vec<u8> = Vec::from(&buf[..nread as usize + 1]);
+
+ let files = s.spawn(move |_| {
+ //let bump = std::sync::Mutex::new(Bump::new());
// Rough approximation of the number of entries. Actual
// size changes from entry to entry due to variable string
// size.
let cap = nread as usize / std::mem::size_of::<linux_dirent>();
// Use a local Vec to avoid contention on Mutex
- let mut localfiles = Vec::with_capacity(cap);
+ let mut localfiles = Vec::with_capacity(dbg!(cap));
let bufptr = buf.as_mut_ptr() as usize;
+
let mut bpos: usize = 0;
while bpos < nread as usize {
@@ -488,16 +544,17 @@ pub fn from_getdents(fd: i32, path: &Path, nothidden: &AtomicUsize) -> Result<V
// OOB!!!
if bpos + name_len > BUFFER_SIZE {
+ panic!("LOLWUT");
HError::log::<()>(&format!("WARNING: Name for file was out of bounds in: {}",
path.to_string_lossy())).ok();
- return DentStatus::Err(FileError::GetDents(path.to_string_lossy().to_string()));
+ return Err(FileError::GetDents(path.to_string_lossy().to_string()));
}
// Add length of current dirent to the current offset
// tbuffer[n] -> buffer[n + len(buffer[n])
bpos = bpos + d.d_reclen as usize;
- let name: &OsStr = {
+ let (name_ostr, name_bytes, name_len): (&OsStr, &[u8], usize) = {
// Safe as long as d_name is NULL terminated
let true_len = unsafe { libc::strlen(d.d_name.as_ptr() as *const i8) };
// Safe if strlen returned without SEGFAULT on OOB (if d_name weren't NULL terminated)
@@ -505,12 +562,14 @@ pub fn from_getdents(fd: i32, path: &Path, nothidden: &AtomicUsize) -> Result<V
true_len) };
// Don't want this
- if bytes.len() == 0 || bytes == b"." || bytes == b".." {
+ if true_len == 0 || bytes == b"." || bytes == b".." {
continue;
}
// A bit sketchy maybe, but if all checks passed, should be OK.
- unsafe { std::mem::transmute::<&[u8], &OsStr>(bytes) }
+ (unsafe { std::mem::transmute::<&[u8], &OsStr>(bytes) },
+ bytes,
+ true_len)
};
// Avoid reallocation on push
@@ -537,7 +596,7 @@ pub fn from_getdents(fd: i32, path: &Path, nothidden: &AtomicUsize) -> Result<V
let stat =
match fstatat(fd, &path, flags) {
Ok(stat) => stat,
- Err(_) => return DentStatus::Err(FileError::GetDents(path.to_string_lossy()
+ Err(_) => return Err(FileError::GetDents(path.to_string_lossy()
.to_string()))
};
@@ -562,11 +621,26 @@ pub fn from_getdents(fd: i32, path: &Path, nothidden: &AtomicUsize) -> Result<V
_ => (Kind::File, None)
};
- let name = name.to_str()
- .map(|n| String::from(n))
- .unwrap_or_else(|| name.to_string_lossy().to_string());
+ // Avoid reallocation on push
+ let path = {
+ let pathlen = path_len;
+ let totallen = pathlen + name_len + 2;
+
+ let mut path = alloc.pathbuf(totallen);
+ path.write(path_ostr.as_bytes());
+ path.write(&[b'/']);
+ path.write(name_bytes);
+ path.finalize_pathbuf()
+ };
- let hidden = name.as_bytes()[0] == b'.';
+ let name: String = {
+ let mut string = alloc.string(name_len);
+ string.write(name_bytes);
+ string.finalize_string()
+ };
+
+
+ let hidden = name_bytes[0] == b'.';
if !hidden {
nothidden.fetch_add(1, Ordering::Relaxed);
@@ -585,19 +659,33 @@ pub fn from_getdents(fd: i32, path: &Path, nothidden: &AtomicUsize) -> Result<V
tag: None,
};
- // Push into local Vec
- localfiles.push(file);
+ localfiles.push(file)
}
- // Successfully looped over all dirents. Now append everything at once
- files.lock().unwrap().append(&mut localfiles);
- DentStatus::Done
+ // Now add all files from this worker thread
+ files.lock().map(|mut f| {
+ f.extend(localfiles);
+ }).unwrap();
+
+ Ok(())
});
}
});
+
+ dbg!(files.lock().unwrap().len());
+
match result {
- Ok(()) => Ok(std::mem::take(&mut *files.lock().unwrap())),
+ Ok(()) => {
+ // let len = workset.iter().map(|(_, f, _)| f.len()).sum();
+ // let mut files = Vec::with_capacity(len);
+
+ // for i in 0..4 {
+ // files.par_extend(std::mem::take(&mut workset[i].1));
+ // }
+
+ return Ok(std::mem::take(&mut *files.lock().unwrap()))
+ }
Err(_) => Err(FileError::GetDents(path.to_string_lossy().to_string()))
}
}
@@ -617,8 +705,15 @@ impl Files {
Mode::empty())
.map_err(|e| FileError::OpenDir(e))?;
- let direntries = from_getdents(dir.as_raw_fd(), path, &nonhidden)?;
+ use std::time::Instant;
+ let alloc = Allocator::new();
+ let now = Instant::now();
+ let direntries = from_getdents(dir.as_raw_fd(),
+ path,
+ &nonhidden,
+ &alloc)?;
+ dbg!("READING: ", now.elapsed().as_millis());
if stale.is_stale()? {
HError::stale()?;
}
@@ -626,10 +721,10 @@ impl Files {
let mut files = Files::default();
files.directory = File::new_from_path(&path)?;
-
files.files = direntries;
files.len = nonhidden.load(Ordering::Relaxed);
files.stale = Some(stale);
+ files.alloc = Some(alloc);
Ok(files)
}
@@ -676,6 +771,7 @@ impl Files {
}
pub fn enqueue_jobs(&mut self, n: usize) {
+ return;
let from = self.meta_upto.unwrap_or(0);
self.meta_upto = Some(from + n);
@@ -684,18 +780,19 @@ impl Files {
None => return
};
- let mut jobs = self.iter_files_mut()
- .collect::<Vec<&mut File>>()
- .into_par_iter()
- .skip(from)
- .take(n)
- .filter_map(|f| f.prepare_meta_job(&cache))
- .collect::<Vec<_>>();
+ // let mut jobs = self.iter_files_mut()
+ // .collect::<Vec<&mut File>>()
+ // .into_par_iter()
+ // .skip(from)
+ // .take(n)
+ // .filter_map(|f| f.prepare_meta_job(&cache))
+ // .collect::<Vec<_>>();
- self.jobs.append(&mut jobs);
+ // self.jobs.append(&mut jobs);
}
pub fn run_jobs(&mut self, sender: Sender<Events>) {
+ return;
let jobs = std::mem::take(&mut self.jobs);
let stale = self.stale
.clone()
@@ -741,21 +838,23 @@ impl Files {
}
pub fn recalculate_len(&mut self) {
- self.len = self.par_iter_files().count();
+ self.len = self.iter_files().count();
}
pub fn get_file_mut(&mut self, index: usize) -> Option<&mut File> {
// Need actual length of self.files for this
let hidden_in_between = self.files_in_between(index, self.files.len());
- self.files.get_mut(index + hidden_in_between)
+ let file = self.files.as_vec_like_mut().get(index + hidden_in_between);
+ None
}
- pub fn par_iter_files(&self) -> impl ParallelIterator<Item=&File> {
+ pub fn par_iter_files(&self) -> impl Iterator<Item=&File> {
let filter_fn = self.filter_fn();
self.files
- .par_iter()
+ .as_vec_like()
+ .iter()
.filter(move |f| filter_fn(f))
}
@@ -763,32 +862,40 @@ impl Files {
let filter_fn = self.filter_fn();
self.files
+ .as_vec_like()
.iter()
.filter(move |&f| filter_fn(f))
}
+ #[allow(mutable_transmutes)]
pub fn files_in_between(&self, pos: usize, n_before: usize) -> usize {
let filter_fn = self.filter_fn();
- self.files[..pos].iter()
- .rev()
- .enumerate()
- .filter(|(_, f)| filter_fn(f))
- .take(n_before)
- .map(|(i, _)| i + 1)
- .last()
- .unwrap_or(0)
+ unsafe {std::mem::transmute(self.files
+ .iter()
+ .take(pos)
+ .collect::<Vec<_>>()
+ .iter()
+ .rev()
+ .enumerate()
+ .filter(|(_, f)| filter_fn(f))
+ .take(n_before)
+ .map(|(i, _)| i + 1)
+ .last()
+ .unwrap_or(0)) }
+
}
- pub fn iter_files_from(&self, from: &File, n_before: usize) -> impl Iterator<Item=&File> {
- let fpos = self.find_file(from).unwrap_or(0);
+ pub fn iter_files_from(&mut self, from: &File, n_before: usize) -> impl Iterator<Item=&File> {
+ // let fpos = self.find_file(from).unwrap_or(0);
- let files_in_between = self.files_in_between(fpos, n_before);
+ // let files_in_between = self.files_in_between(fpos, n_before);
let filter_fn = self.filter_fn();
- self.files[fpos.saturating_sub(files_in_between)..]
- .iter()
+ use splay_tree::set::SuperIter;
+ self.files
+ .better_iter_from(n_before)
.filter(move |f| filter_fn(f))
}
@@ -798,17 +905,35 @@ impl Files {
let filter_fn = self.filter_fn();
- self.files[fpos.saturating_sub(files_in_between)..]
- .iter_mut()
+ self.iter_files_mut()
+ .skip(fpos.saturating_sub(files_in_between))
.filter(move |f| filter_fn(f))
}
- pub fn iter_files_mut(&mut self) -> impl Iterator<Item=&mut File> {
+ #[allow(mutable_transmutes)]
+ pub fn iter_files_mut(&mut self) -> impl Iterator<Item=&mut File> {
let filter_fn = self.filter_fn();
-
self.files
- .iter_mut()
+ .iter()
.filter(move |f| filter_fn(f))
+ .map(|f| unsafe { std::mem::transmute(f) })
+ //let wut = &mut self;
+
+ // std::iter::from_fn(move || -> Option<&mut File> {
+ // // match wut.files.take_smallest() {
+ // // Some(f) => {
+ // // wut.taken.push(f);
+ // // wut.taken.last_mut()
+ // // }
+ // // None => {
+ // // for file in std::mem::take(&mut wut.taken).into_iter() {
+ // // wut.files.insert(file.clone());
+ // // }
+ // // None
+ // // }
+ // // }
+ // None
+ // })
}
#[allow(trivial_bounds)]
@@ -822,7 +947,7 @@ impl Files {
!(filter.is_some() &&
!f.name.contains(filter.as_ref().unwrap())) &&
(!filter_selected || f.selected) &&
- !(!show_hidden && f.name.starts_with("."))
+ !(!show_hidden && f.name.as_bytes()[0] == b'.')
}
}
@@ -916,10 +1041,15 @@ impl Files {
}
pub fn sort(&mut self) {
+ use std::time::Instant;
+
+ let now = Instant::now();
+
let sort = self.sorter();
- self.files
- .par_sort_unstable_by(sort);
+ // dmsort::sort_by(&mut self.files, sort);
+
+ dbg!("SORTING: ", now.elapsed().as_millis());
}
pub fn cycle_sort(&mut self) {
@@ -952,7 +1082,7 @@ impl Files {
let dirpath = self.directory.path.clone();
self.find_file_with_path(&dirpath).cloned()
.map(|placeholder| {
- self.files.remove_item(&placeholder);
+ self.files.remove(&placeholder);
if self.len > 0 {
self.len -= 1;
}
@@ -960,27 +1090,28 @@ impl Files {
}
pub fn ready_to_refresh(&self) -> HResult<bool> {
- let pending = self.pending_events.read()?.len();
- let running = self.refresh.is_some();
- Ok(pending > 0 && !running)
+ // let pending = self.pending_events.read()?.len();
+ // let running = self.refresh.is_some();
+ // Ok(pending > 0 && !running)
+ Ok(false)
}
pub fn get_refresh(&mut self) -> HResult<Option<RefreshPackage>> {
- if let Some(mut refresh) = self.refresh.take() {
- if refresh.is_ready() {
- self.stale.as_ref().map(|s| s.set_fresh());
- refresh.pull_async()?;
- let mut refresh = refresh.value?;
- self.files = refresh.new_files.take()?;
- self.jobs.append(&mut refresh.jobs);
- if refresh.new_len != self.len() {
- self.len = refresh.new_len;
- }
- return Ok(Some(refresh));
- } else {
- self.refresh.replace(refresh);
- }
- }
+ // if let Some(mut refresh) = self.refresh.take() {
+ // if refresh.is_ready() {
+ // self.stale.as_ref().map(|s| s.set_fresh());
+ // refresh.pull_async()?;
+ // let mut refresh = refresh.value?;
+ // self.files = refresh.new_files.take()?;
+ // self.jobs.append(&mut refresh.jobs);
+ // if refresh.new_len != self.len() {
+ // self.len = refresh.new_len;
+ // }
+ // return Ok(Some(refresh));
+ // } else {