From 156893d774b4da5b541fdbb08428f9ec392949a0 Mon Sep 17 00:00:00 2001 From: Ellie Huxtable Date: Sun, 25 Apr 2021 18:21:52 +0100 Subject: Update docs, unify on SQLx, bugfixes (#40) * Begin moving to sqlx for local too * Stupid scanners should just have a nice cup of tea Random internet shit searching for /.env or whatever * Remove diesel and rusqlite fully --- .github/workflows/rust.yml | 2 +- Cargo.lock | 42 +-- Cargo.toml | 5 +- Dockerfile | 2 +- README.md | 155 ++------- atuin-client/Cargo.toml | 2 +- .../migrations/20210422143411_create_history.sql | 16 + atuin-client/src/database.rs | 355 ++++++++------------- atuin-client/src/encryption.rs | 2 +- atuin-client/src/history.rs | 2 +- atuin-client/src/settings.rs | 46 +-- atuin-client/src/sync.rs | 10 +- atuin-common/src/utils.rs | 39 +++ .../migrations/20210425153745_create_history.sql | 11 + .../migrations/20210425153757_create_users.sql | 10 + .../migrations/20210425153800_create_sessions.sql | 6 + atuin-server/src/database.rs | 2 + atuin-server/src/router.rs | 7 +- atuin-server/src/settings.rs | 5 +- docs/config.md | 99 ++++++ docs/import.md | 27 ++ docs/list.md | 11 + docs/search.md | 39 +++ docs/stats.md | 36 +++ docs/sync.md | 55 ++++ install.sh | 1 + migrations/.gitkeep | 0 .../00000000000000_diesel_initial_setup/down.sql | 6 - .../00000000000000_diesel_initial_setup/up.sql | 36 --- .../2021-03-20-151809_create_history/down.sql | 2 - migrations/2021-03-20-151809_create_history/up.sql | 13 - migrations/2021-03-20-171007_create_users/down.sql | 2 - migrations/2021-03-20-171007_create_users/up.sql | 11 - .../2021-03-21-181750_create_sessions/down.sql | 2 - .../2021-03-21-181750_create_sessions/up.sql | 6 - src/command/event.rs | 8 +- src/command/history.rs | 112 ++++--- src/command/import.rs | 12 +- src/command/mod.rs | 42 ++- src/command/search.rs | 252 ++++++++++----- src/command/stats.rs | 10 +- src/command/sync.rs | 8 +- 42 files changed, 872 insertions(+), 637 deletions(-) create mode 100644 atuin-client/migrations/20210422143411_create_history.sql create mode 100644 atuin-server/migrations/20210425153745_create_history.sql create mode 100644 atuin-server/migrations/20210425153757_create_users.sql create mode 100644 atuin-server/migrations/20210425153800_create_sessions.sql create mode 100644 docs/config.md create mode 100644 docs/import.md create mode 100644 docs/list.md create mode 100644 docs/search.md create mode 100644 docs/stats.md create mode 100644 docs/sync.md create mode 100644 install.sh delete mode 100644 migrations/.gitkeep delete mode 100644 migrations/00000000000000_diesel_initial_setup/down.sql delete mode 100644 migrations/00000000000000_diesel_initial_setup/up.sql delete mode 100644 migrations/2021-03-20-151809_create_history/down.sql delete mode 100644 migrations/2021-03-20-151809_create_history/up.sql delete mode 100644 migrations/2021-03-20-171007_create_users/down.sql delete mode 100644 migrations/2021-03-20-171007_create_users/up.sql delete mode 100644 migrations/2021-03-21-181750_create_sessions/down.sql delete mode 100644 migrations/2021-03-21-181750_create_sessions/up.sql diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 1a8ac289..54bbbb4f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -38,7 +38,7 @@ jobs: override: true - name: Run cargo test - run: cargo test + run: cargo test --workspace clippy: runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 8249cd4c..f9f2252f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -92,6 +92,7 @@ dependencies = [ "chrono", "chrono-english", "cli-table", + "crossbeam-channel", "directories", "eyre", "fork", @@ -100,11 +101,11 @@ dependencies = [ "itertools", "log", "pretty_env_logger", - "rusqlite", "serde 1.0.125", "serde_derive", "serde_json", "structopt", + "tabwriter", "termion", "tokio", "tui", @@ -132,13 +133,13 @@ dependencies = [ "rand 0.8.3", "reqwest", "rmp-serde", - "rusqlite", "rust-crypto", "serde 1.0.125", "serde_derive", "serde_json", "shellexpand", "sodiumoxide", + "sqlx", "tokio", "urlencoding", "uuid", @@ -606,18 +607,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "fallible-iterator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" - -[[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" - [[package]] name = "fern" version = "0.6.0" @@ -1817,21 +1806,6 @@ dependencies = [ "serde 1.0.125", ] -[[package]] -name = "rusqlite" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc783b7ddae608338003bac1fa00b6786a75a9675fbd8e87243ecfdea3f6ed2" -dependencies = [ - "bitflags", - "fallible-iterator", - "fallible-streaming-iterator", - "hashlink", - "libsqlite3-sys", - "memchr", - "smallvec", -] - [[package]] name = "rust-argon2" version = "0.8.3" @@ -2116,6 +2090,7 @@ dependencies = [ "hmac", "itoa", "libc", + "libsqlite3-sys", "log", "md-5", "memchr", @@ -2234,6 +2209,15 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tabwriter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36205cfc997faadcc4b0b87aaef3fbedafe20d38d4959a7ca6ff803564051111" +dependencies = [ + "unicode-width", +] + [[package]] name = "tap" version = "1.0.1" diff --git a/Cargo.toml b/Cargo.toml index 16d4c655..88a85823 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,6 @@ chrono-english = "0.1.4" cli-table = "0.4" base64 = "0.13.0" humantime = "2.1.0" +tabwriter = "1.2.1" +crossbeam-channel = "0.5.1" -[dependencies.rusqlite] -version = "0.25" -features = ["bundled"] diff --git a/Dockerfile b/Dockerfile index 4f0d615b..e90a2e5d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,7 +21,7 @@ FROM debian:buster-slim as runtime WORKDIR app ENV TZ=Etc/UTC -ENV RUST_LOG=info +ENV RUST_LOG=atuin::api=info ENV ATUIN_CONFIG_DIR=/config COPY --from=builder /app/target/release/atuin /usr/local/bin diff --git a/README.md b/README.md index 76289fa9..1533e3c8 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,7 @@

- A'Tuin + Atuin

-
- Through the fathomless deeps of space swims the star turtle Great A’Tuin, bearing on its back the four giant elephants who carry on their shoulders the mass of the Discworld. -
+Magical shell history

@@ -12,28 +10,42 @@

-A'Tuin manages and synchronizes your shell history! Instead of storing -everything in a text file (such as ~/.history), A'Tuin uses a sqlite database. -While being a little more complex, this allows for more functionality. - -As well as the expected command, A'Tuin stores - -- duration -- exit code -- working directory -- hostname -- time -- a unique session ID +- store shell history in a sqlite database +- back up e2e encrypted history to the cloud, and synchronize between machines +- log exit code, cwd, hostname, session, command duration, etc +- smart interactive history search to replace ctrl-r +- calculate statistics such as "most used command" +- old history file is not replaced + +## Documentation + +- [Quickstart](#quickstart) +- [Install](#install) +- [Import](docs/import.md) +- [Configuration](docs/config.md) +- [Searching history](docs/search.md) +- [Cloud history sync](docs/sync.md) +- [History stats](docs/stats.md) ## Supported Shells - zsh +# Quickstart + +``` +curl https://github.com/ellie/atuin/blob/main/install.sh | bash + +atuin register -u -e -p +atuin import auto +atuin sync +``` + ## Install ### AUR -A'Tuin is available on the [AUR](https://aur.archlinux.org/packages/atuin/) +Atuin is available on the [AUR](https://aur.archlinux.org/packages/atuin/) ``` yay -S atuin # or your AUR helper of choice @@ -41,19 +53,16 @@ yay -S atuin # or your AUR helper of choice ### With cargo -`atuin` needs a nightly version of Rust + Cargo! It's best to use -[rustup](https://rustup.rs/) for getting set up there. +It's best to use [rustup](https://rustup.rs/) to get setup with a Rust +toolchain, then you can run: ``` -rustup default nightly - cargo install atuin ``` ### From source ``` -rustup default nightly git clone https://github.com/ellie/atuin.git cd atuin cargo install --path . @@ -67,107 +76,9 @@ Once the binary is installed, the shell plugin requires installing. Add eval "$(atuin init)" ``` -to your `.zshrc`/`.bashrc`/whatever your shell uses. - -## Usage - -### History search - -By default A'Tuin will rebind ctrl-r and the up arrow to search your history. - -You can prevent this by putting - -``` -export ATUIN_BINDKEYS="false" -``` - -into your shell config. - -### Import history - -``` -atuin import auto # detect shell, then import - -or - -atuin import zsh # specify shell -``` - -### List history - -List all history - -``` -atuin history list -``` - -List history for the current directory - -``` -atuin history list --cwd - -atuin h l -c # alternative, shorter version -``` - -List history for the current session - -``` -atuin history list --session - -atuin h l -s # similarly short -``` - -### Stats - -A'Tuin can calculate statistics for a single day, and accepts "natural language" style date input, as well as absolute dates: - -``` -$ atuin stats day last friday - -+---------------------+------------+ -| Statistic | Value | -+---------------------+------------+ -| Most used command | git status | -+---------------------+------------+ -| Commands ran | 450 | -+---------------------+------------+ -| Unique commands ran | 213 | -+---------------------+------------+ - -$ atuin stats day 01/01/21 # also accepts absolute dates -``` - -It can also calculate statistics for all of known history: - -``` -$ atuin stats all - -+---------------------+-------+ -| Statistic | Value | -+---------------------+-------+ -| Most used command | ls | -+---------------------+-------+ -| Commands ran | 8190 | -+---------------------+-------+ -| Unique commands ran | 2996 | -+---------------------+-------+ -``` - -## Config - -A'Tuin is configurable via TOML. The file lives at ` ~/.config/atuin/config.toml`, -and looks like this: - -``` -[local] -dialect = "uk" # or us. sets the date format used by stats -server_address = "https://atuin.elliehuxtable.com/" # the server to sync with - -[local.db] -path = "~/.local/share/atuin/history.db" # the local database for history -``` +to your `.zshrc` ## ...what's with the name? -A'Tuin is named after "The Great A'Tuin", a giant turtle from Terry Pratchett's +Atuin is named after "The Great A'Tuin", a giant turtle from Terry Pratchett's Discworld series of books. diff --git a/atuin-client/Cargo.toml b/atuin-client/Cargo.toml index 4d3e9130..bd09ca42 100644 --- a/atuin-client/Cargo.toml +++ b/atuin-client/Cargo.toml @@ -37,6 +37,6 @@ tokio = { version = "1", features = ["full"] } async-trait = "0.1.49" urlencoding = "1.1.1" humantime = "2.1.0" -rusqlite= { version = "0.25", features = ["bundled"] } itertools = "0.10.0" shellexpand = "2" +sqlx = { version = "0.5", features = [ "runtime-tokio-rustls", "uuid", "chrono", "sqlite" ] } diff --git a/atuin-client/migrations/20210422143411_create_history.sql b/atuin-client/migrations/20210422143411_create_history.sql new file mode 100644 index 00000000..23c63a4f --- /dev/null +++ b/atuin-client/migrations/20210422143411_create_history.sql @@ -0,0 +1,16 @@ +-- Add migration script here +create table if not exists history ( + id text primary key, + timestamp text not null, + duration integer not null, + exit integer not null, + command text not null, + cwd text not null, + session text not null, + hostname text not null, + + unique(timestamp, cwd, command) +); + +create index if not exists idx_history_timestamp on history(timestamp); +create index if not exists idx_history_command on history(command); diff --git a/atuin-client/src/database.rs b/atuin-client/src/database.rs index 0855359b..754a0ecf 100644 --- a/atuin-client/src/database.rs +++ b/atuin-client/src/database.rs @@ -1,44 +1,48 @@ -use chrono::prelude::*; -use chrono::Utc; use std::path::Path; +use std::str::FromStr; + +use async_trait::async_trait; +use chrono::Utc; -use eyre::{eyre, Result}; +use eyre::Result; -use rusqlite::{params, Connection}; -use rusqlite::{Params, Transaction}; +use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqlitePoolOptions}; use super::history::History; +#[async_trait] pub trait Database { - fn save(&mut self, h: &History) -> Result<()>; - fn save_bulk(&mut self, h: &[History]) -> Result<()>; + async fn save(&mut self, h: &History) -> Result<()>; + async fn save_bulk(&mut self, h: &[History]) -> Result<()>; - fn load(&self, id: &str) -> Result; - fn list(&self, max: Option, unique: bool) -> Result>; - fn range(&self, from: chrono::DateTime, to: chrono::DateTime) - -> Result>; + async fn load(&self, id: &str) -> Result; + async fn list(&self, max: Option, unique: bool) -> Result>; + async fn range( + &self, + from: chrono::DateTime, + to: chrono::DateTime, + ) -> Result>; - fn query(&self, query: &str, params: impl Params) -> Result>; - fn update(&self, h: &History) -> Result<()>; - fn history_count(&self) -> Result; + async fn update(&self, h: &History) -> Result<()>; + async fn history_count(&self) -> Result; - fn first(&self) -> Result; - fn last(&self) -> Result; - fn before(&self, timestamp: chrono::DateTime, count: i64) -> Result>; + async fn first(&self) -> Result; + async fn last(&self) -> Result; + async fn before(&self, timestamp: chrono::DateTime, count: i64) -> Result>; - fn prefix_search(&self, query: &str) -> Result>; + async fn search(&self, limit: Option, query: &str) -> Result>; - fn search(&self, cwd: Option, exit: Option, query: &str) -> Result>; + async fn query_history(&self, query: &str) -> Result>; } // Intended for use on a developer machine and not a sync server. // TODO: implement IntoIterator pub struct Sqlite { - conn: Connection, + pool: SqlitePool, } impl Sqlite { - pub fn new(path: impl AsRef) -> Result { + pub async fn new(path: impl AsRef) -> Result { let path = path.as_ref(); debug!("opening sqlite database at {:?}", path); @@ -49,137 +53,106 @@ impl Sqlite { } } - let conn = Connection::open(path)?; + let opts = SqliteConnectOptions::from_str(path.as_os_str().to_str().unwrap())? + .journal_mode(SqliteJournalMode::Wal) + .create_if_missing(true); + + let pool = SqlitePoolOptions::new().connect_with(opts).await?; - Self::setup_db(&conn)?; + Self::setup_db(&pool).await?; - Ok(Self { conn }) + Ok(Self { pool }) } - fn setup_db(conn: &Connection) -> Result<()> { + async fn setup_db(pool: &SqlitePool) -> Result<()> { debug!("running sqlite database setup"); - conn.execute( - "create table if not exists history ( - id text primary key, - timestamp integer not null, - duration integer not null, - exit integer not null, - command text not null, - cwd text not null, - session text not null, - hostname text not null, - - unique(timestamp, cwd, command) - )", - [], - )?; - - conn.execute( - "create table if not exists history_encrypted ( - id text primary key, - data blob not null - )", - [], - )?; - - conn.execute( - "create index if not exists idx_history_timestamp on history(timestamp)", - [], - )?; - - conn.execute( - "create index if not exists idx_history_command on history(command)", - [], - )?; + sqlx::migrate!("./migrations").run(pool).await?; Ok(()) } - fn save_raw(tx: &Transaction, h: &History) -> Result<()> { - tx.execute( - "insert or ignore into history ( - id, - timestamp, - duration, - exit, - command, - cwd, - session, - hostname - ) values (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", - params![ - h.id, - h.timestamp.timestamp_nanos(), - h.duration, - h.exit, - h.command, - h.cwd, - h.session, - h.hostname - ], - )?; + async fn save_raw(tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>, h: &History) -> Result<()> { + sqlx::query( + "insert or ignore into history(id, timestamp, duration, exit, command, cwd, session, hostname) + values(?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", + ) + .bind(h.id.as_str()) + .bind(h.timestamp.to_rfc3339()) + .bind(h.duration) + .bind(h.exit) + .bind(h.command.as_str()) + .bind(h.cwd.as_str()) + .bind(h.session.as_str()) + .bind(h.hostname.as_str()) + .execute(tx) + .await?; Ok(()) } } +#[async_trait] impl Database for Sqlite { - fn save(&mut self, h: &History) -> Result<()> { + async fn save(&mut self, h: &History) -> Result<()> { debug!("saving history to sqlite"); - let tx = self.conn.transaction()?; - Self::save_raw(&tx, h)?; - tx.commit()?; + let mut tx = self.pool.begin().await?; + Self::save_raw(&mut tx, h).await?; + tx.commit().await?; Ok(()) } - fn save_bulk(&mut self, h: &[History]) -> Result<()> { + async fn save_bulk(&mut self, h: &[History]) -> Result<()> { debug!("saving history to sqlite"); - let tx = self.conn.transaction()?; + let mut tx = self.pool.begin().await?; + for i in h { - Self::save_raw(&tx, i)? + Self::save_raw(&mut tx, i).await? } - tx.commit()?; + + tx.commit().await?; Ok(()) } - fn load(&self, id: &str) -> Result { + async fn load(&self, id: &str) -> Result { debug!("loading history item {}", id); - let history = self.query( - "select id, timestamp, duration, exit, command, cwd, session, hostname from history - where id = ?1 limit 1", - &[id], - )?; + let res = sqlx::query_as::<_, History>("select * from history where id = ?1") + .bind(id) + .fetch_one(&self.pool) + .await?; - if history.is_empty() { - return Err(eyre!("could not find history with id {}", id)); - } - - let history = history[0].clone(); - - Ok(history) + Ok(res) } - fn update(&self, h: &History) -> Result<()> { + async fn update(&self, h: &History) -> Result<()> { debug!("updating sqlite history"); - self.conn.execute( + sqlx::query( "update history set timestamp = ?2, duration = ?3, exit = ?4, command = ?5, cwd = ?6, session = ?7, hostname = ?8 where id = ?1", - params![h.id, h.timestamp.timestamp_nanos(), h.duration, h.exit, h.command, h.cwd, h.session, h.hostname], - )?; + ) + .bind(h.id.as_str()) + .bind(h.timestamp.to_rfc3339()) + .bind(h.duration) + .bind(h.exit) + .bind(h.command.as_str()) + .bind(h.cwd.as_str()) + .bind(h.session.as_str()) + .bind(h.hostname.as_str()) + .execute(&self.pool) + .await?; Ok(()) } // make a unique list, that only shows the *newest* version of things - fn list(&self, max: Option, unique: bool) -> Result> { + async fn list(&self, max: Option, unique: bool) -> Result> { debug!("listing history"); // very likely vulnerable to SQL injection @@ -208,144 +181,96 @@ impl Database for Sqlite { } ); - let history = self.query(query.as_str(), params![])?; + let res = sqlx::query_as::<_, History>(query.as_str()) + .fetch_all(&self.pool) + .await?; - Ok(history) + Ok(res) } - fn range( + async fn range( &self, from: chrono::DateTime, to: chrono::DateTime, ) -> Result> { debug!("listing history from {:?} to {:?}", from, to); - let mut stmt = self.conn.prepare( - "SELECT * FROM history where timestamp >= ?1 and timestamp <= ?2 order by timestamp asc", - )?; - - let history_iter = stmt.query_map( - params![from.timestamp_nanos(), to.timestamp_nanos()], - |row| history_from_sqlite_row(None, row), - )?; + let res = sqlx::query_as::<_, History>( + "select * from history where timestamp >= ?1 and timestamp <= ?2 order by timestamp asc", + ) + .bind(from) + .bind(to) + .fetch_all(&self.pool) + .await?; - Ok(history_iter.filter_map(Result::ok).collect()) + Ok(res) } - fn first(&self) -> Result { - let mut stmt = self - .conn - .prepare("SELECT * FROM history order by timestamp asc limit 1")?; - - let history = stmt.query_row(params![], |row| history_from_sqlite_row(None, row))?; + async fn first(&self) -> Result { + let res = sqlx::query_as::<_, History>( + "select * from history where duration >= 0 order by timestamp asc limit 1", + ) + .fetch_one(&self.pool) + .await?; - Ok(history) + Ok(res) } - fn last(&self) -> Result { - let mut stmt = self - .conn - .prepare("SELECT * FROM history where duration >= 0 order by timestamp desc limit 1")?; - - let history = stmt.query_row(params![], |row| history_from_sqlite_row(None, row))?; + async fn last(&self) -> Result { + let res = sqlx::query_as::<_, History>( + "select * from history where duration >= 0 order by timestamp desc limit 1", + ) + .fetch_one(&self.pool) + .await?; - Ok(history) + Ok(res) } - fn before(&self, timestamp: chrono::DateTime, count: i64) -> Result> { - let mut stmt = self - .conn - .prepare("SELECT * FROM history where timestamp < ? order by timestamp desc limit ?")?; - - let history_iter = stmt.query_map(params![timestamp.timestamp_nanos(), count], |row| { - history_from_sqlite_row(None, row) - })?; + async fn before(&self, timestamp: chrono::DateTime, count: i64) -> Result> { + let res = sqlx::query_as::<_, History>( + "select * from history where timestamp < ?1 order by timestamp desc limit ?2", + ) + .bind(timestamp) + .bind(count) + .fetch_all(&self.pool) + .await?; - Ok(history_iter.filter_map(Result::ok).collect()) + Ok(res) } - fn query(&self, query: &str, params: impl Params) -> Result> { - let mut stmt = self.conn.prepare(query)?; + async fn history_count(&self) -> Result { + let res: (i64,) = sqlx::query_as("select count(1) from history") + .fetch_one(&self.pool) + .await?; - let history_iter = stmt.query_map(params, |row| history_from_sqlite_row(None, row))?; - - Ok(history_iter.filter_map(Result::ok).collect()) + Ok(res.0) } - fn prefix_search(&self, query: &str) -> Result> { + async fn search(&self, limit: Option, query: &str) -> Result> { let query = query.to_string().replace("*", "%"); // allow wildcard char + let limit = limit.map_or("".to_owned(), |l| format!("limit {}", l)); - self.query( - "select * from history h - where command like ?1 || '%' - and timestamp = ( - select max(timestamp) from history - where h.command = history.command - ) - order by timestamp desc limit 200", - &[query.as_str()], + let res = sqlx::query_as::<_, History>( + format!( + "select * from history + where command like ?1 || '%' + order by timestamp desc {}", + limit.clone() + ) + .as_str(), ) - } - - fn history_count(&self) -> Result { - let res: i64 = - self.conn - .query_row_and_then("select count(1) from history;", params![], |row| row.get(0))?; + .bind(query) + .fetch_all(&self.pool) + .await?; Ok(res) } - fn search(&self, cwd: Option, exit: Option, query: &str) -> Result> { - match (cwd, exit) { - (Some(cwd), Some(exit)) => self.query( - "select * from history - where command like ?1 || '%' - and cwd = ?2 - and exit = ?3 - order by timestamp asc limit 1000", - &[query, cwd.as_str(), exit.to_string().as_str()], - ), - (Some(cwd), None) => self.query( - "select * from history - where command like ?1 || '%' - and cwd = ?2 - order by timestamp asc limit 1000", - &[query, cwd.as_str()], - ), - (None, Some(exit)) => self.query( - "select * from history - where command like ?1 || '%' - and exit = ?2 - order by timestamp asc limit 1000", - &[query, exit.to_string().as_str()], - ), - (None, None) => self.query( - "select * from history - where command like ?1 || '%' - order by timestamp asc limit 1000", - &[query], - ), - } - } -} + async fn query_history(&self, query: &str) -> Result> { + let res = sqlx::query_as::<_, History>(query) + .fetch_all(&self.pool) + .await?; -fn history_from_sqlite_row( - id: Option, - row: &rusqlite::Row, -) -> Result { - let id = match id { - Some(id) => id, - None => row.get(0)?, - }; - - Ok(History { - id, - timestamp: Utc.timestamp_nanos(row.get(1)?), - duration: row.get(2)?, - exit: row.get(3)?, - command: row.get(4)?, - cwd: row.get(5)?, - session: row.get(6)?, - hostname: row.get(7)?, - }) + Ok(res) + } } diff --git a/atuin-client/src/encryption.rs b/atuin-client/src/encryption.rs index 19b773ab..9cb8d3ea 100644 --- a/atuin-client/src/encryption.rs +++ b/atuin-client/src/encryption.rs @@ -98,7 +98,7 @@ pub fn decrypt(encrypted_history: &EncryptedHistory, key: &secretbox::Key) -> Re mod test { use sodiumoxide::crypto::secretbox; - use crate::local::history::History; + use crate::history::History; use super::{decrypt, encrypt}; diff --git a/atuin-client/src/history.rs b/atuin-client/src/history.rs index 8dd161db..92e92ddf 100644 --- a/atuin-client/src/history.rs +++ b/atuin-client/src/history.rs @@ -6,7 +6,7 @@ use chrono::Utc; use atuin_common::utils::uuid_v4; // Any new fields MUST be Optional<>! -#[derive(Debug, Clone, Serialize, Deserialize, Ord, PartialOrd)] +#[derive(Debug, Clone, Serialize, Deserialize, Ord, PartialOrd, sqlx::FromRow)] pub struct History { pub id: String, pub timestamp: chrono::DateTime, diff --git a/atuin-client/src/settings.rs b/atuin-client/src/settings.rs index 254bca6d..4ea4be84 100644 --- a/atuin-client/src/settings.rs +++ b/atuin-client/src/settings.rs @@ -5,7 +5,6 @@ use std::path::{Path, PathBuf}; use chrono::prelude::*; use chrono::Utc; use config::{Config, Environment, File as ConfigFile}; -use directories::ProjectDirs; use eyre::{eyre, Result}; use parse_duration::parse; @@ -28,9 +27,10 @@ pub struct Settings { impl Settings { pub fn save_sync_time() -> Result<()> { - let sync_time_path = ProjectDirs::from("com", "elliehuxtable", "atuin") - .ok_or_else(|| eyre!("could not determine key file location"))?; - let sync_time_path = sync_time_path.data_dir().join("last_sync_time"); + let data_dir = atuin_common::utils::data_dir(); + let data_dir = data_dir.as_path(); + + let sync_time_path = data_dir.join("last_sync_time"); std::fs::write(sync_time_path, Utc::now().to_rfc3339())?; @@ -38,15 +38,10 @@ impl Settings { } pub fn last_sync() -> Result> { - let sync_time_path = ProjectDirs::from("com", "elliehuxtable", "atuin"); - - if sync_time_path.is_none() { - debug!("failed to load projectdirs, not syncing"); - return Err(eyre!("could not load project dirs")); - } + let data_dir = atuin_common::utils::data_dir(); + let data_dir = data_dir.as_path(); - let sync_time_path = sync_time_path.unwrap(); - let sync_time_path = sync_time_path.data_dir().join("last_sync_time"); + let sync_time_path = data_dir.join("last_sync_time"); if !sync_time_path.exists() { return Ok(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0)); @@ -73,10 +68,14 @@ impl Settings { } pub fn new() -> Result { - let config_dir = ProjectDirs::from("com", "elliehuxtable", "atuin").unwrap(); - let config_dir = config_dir.config_dir(); + let config_dir = atuin_common::utils::config_dir(); + let config_dir = config_dir.as_path(); + + let data_dir = atuin_common::utils::data_dir(); + let data_dir = data_dir.as_path(); create_dir_all(config_dir)?; + create_dir_all(data_dir)?; let mut config_file = if let Ok(p) = std::env::var("ATUIN_CONFIG_DIR") { PathBuf::from(p) @@ -90,27 +89,16 @@ impl Settings { let mut s = Config::new(); - let db_path = ProjectDirs::from("com", "elliehuxtable", "atuin") - .ok_or_else(|| eyre!("could not determine db file location"))? - .data_dir() - .join("history.db"); - - let key_path = ProjectDirs::from("com", "elliehuxtable", "atuin") - .ok_or_else(|| eyre!("could not determine key file location"))? - .data_dir() - .join("key"); - - let session_path = ProjectDirs::from("com", "elliehuxtable", "atuin") - .ok_or_else(|| eyre!("could not determine session file location"))? - .data_dir() - .join("session"); + let db_path = data_dir.join("history.db"); + let key_path = data_dir.join("key"); + let session_path = data_dir.join("session"); s.set_default("db_path", db_path.to_str())?; s.set_default("key_path", key_path.to_str())?; s.set_default("session_path", session_path.to_str())?; s.set_default("dialect", "us")?; s.set_default("auto_sync", true)?; - s.set_default("sync_frequency", "5m")?; + s.set_default("sync_frequency", "1h")?; s.set_default("sync_address", "https://api.atuin.sh")?; if config_file.exists() { diff --git a/atuin-client/src/sync.rs b/atuin-client/src/sync.rs index 5d81a5e6..94408018 100644 --- a/atuin-client/src/sync.rs +++ b/atuin-client/src/sync.rs @@ -30,7 +30,7 @@ async fn sync_download( let remote_count = client.count().await?; - let initial_local = db.history_count()?; + let initial_local = db.history_count().await?; let mut local_count = initial_local; let mut last_sync = if force { @@ -48,9 +48,9 @@ async fn sync_download( .get_history(last_sync, last_timestamp, host.clone()) .await?; - db.save_bulk(&page)?; + db.save_bulk(&page).await?; - local_count = db.history_count()?; + local_count = db.history_count().await?; if page.len() < HISTORY_PAGE_SIZE.try_into().unwrap() { break; @@ -87,7 +87,7 @@ async fn sync_upload( let initial_remote_count = client.count().await?; let mut remote_count = initial_remote_count; - let local_count = db.history_count()?; + let local_count = db.history_count().await?; debug!("remote has {}, we have {}", remote_count, local_count); @@ -98,7 +98,7 @@ async fn sync_upload( let mut cursor = Utc::now(); while local_count > remote_count { - let last = db.before(cursor, HISTORY_PAGE_SIZE)?; + let last = db.before(cursor, HISTORY_PAGE_SIZE).await?; let mut buffer = Vec::::new(); if last.is_empty() { diff --git a/atuin-common/src/utils.rs b/atuin-common/src/utils.rs index ac5738b3..96a3a1dc 100644 --- a/atuin-common/src/utils.rs +++ b/atuin-common/src/utils.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use crypto::digest::Digest; use crypto::sha2::Sha256; use sodiumoxide::crypto::pwhash::argon2id13; @@ -27,3 +29,40 @@ pub fn hash_str(string: &str) -> String { pub fn uuid_v4() -> String { Uuid::new_v4().to_simple().to_string() } + +pub fn config_dir() -> PathBuf { + // TODO: more reliable, more tested + // I don't want to use ProjectDirs, it puts config in awkward places on + // mac. Data too. Seems to be more intended for GUI apps. + let home = std::env::var("HOME").expect("$HOME not found"); + let home = PathBuf::from(home); + + std::env::var("XDG_CONFIG_HOME").map_or_else( + |_| { + let mut config = home.clone(); + config.push(".config"); + config.push("atuin"); + config + }, + PathBuf::from, + ) +} + +pub fn data_dir() -> PathBuf { + // TODO: more reliable, more tested + // I don't want to use ProjectDirs, it puts config in awkward places on + // mac. Data too. Seems to be more intended for GUI apps. + let home = std::env::var("HOME").expect("$HOME not found"); + let home = PathBuf::from(home); + + std::env::var("XDG_DATA_HOME").map_or_else( + |_| { + let mut data = home.clone(); + data.push(".local"); + data.push("share"); + data.push("atuin"); + data + }, + PathBuf::from, + ) +} diff --git a/atuin-server/migrations/20210425153745_create_history.sql b/atuin-server/migrations/20210425153745_create_history.sql new file mode 100644 index 00000000..2c2d17b0 --- /dev/null +++ b/atuin-server/migrations/20210425153745_create_history.sql @@ -0,0 +1,11 @@ +create table history ( + id bigserial primary key, + client_id text not null unique, -- the client-generated ID + user_id bigserial not null, -- allow multiple users + hostname text not null, -- a unique identifier from the client (can be hashed, random, whatever) + timestamp timestamp not null, -- one of the few non-encrypted metadatas + + data varchar(8192) not null, -- store the actual history data, encrypted. I don't wanna know! + + created_at timestamp not null default current_timestamp +); diff --git a/atuin-server/migrations/20210425153757_create_users.sql b/atuin-server/migrations/20210425153757_create_users.sql new file mode 100644 index 00000000..a25dcced --- /dev/null +++ b/atuin-server/migrations/20210425153757_create_users.sql @@ -0,0 +1,10 @@ +create table users ( + id bigserial primary key, -- also store our own ID + username varchar(32) not null unique, -- being able to contact users is useful + email varchar(128) not null unique, -- being able to contact users is useful + password varchar(128) not null unique +); + +-- the prior index is case sensitive :( +CREATE UNIQUE INDEX email_unique_idx on users (LOWER(email)); +CREATE UNIQUE INDEX username_unique_idx on users (LOWER(username)); diff --git a/atuin-server/migrations/20210425153800_create_sessions.sql b/atuin-server/migrations/20210425153800_create_sessions.sql new file mode 100644 index 00000000..c2fb6559 --- /dev/null +++ b/atuin-server/migrations/20210425153800_create_sessions.sql @@ -0,0 +1,6 @@ +-- Add migration script here +create table sessions ( + id bigserial primary key, + user_id bigserial, + token varchar(128) unique not null +); diff --git a/atuin-server/src/database.rs b/atuin-server/src/database.rs index 5945baaf..4a3828d0 100644 --- a/atuin-server/src/database.rs +++ b/atuin-server/src/database.rs @@ -40,6 +40,8 @@ impl Postgres { .connect(uri) .await?; + sqlx::migrate!("./migrations").run(&pool).await?; + Ok(Self { pool }) } } diff --git a/atuin-server/src/router.rs b/atuin-server/src/router.rs index d106068d..ffab74e5 100644 --- a/atuin-server/src/router.rs +++ b/atuin-server/src/router.rs @@ -1,7 +1,7 @@ use std::convert::Infallible; use eyre::Result; -use warp::Filter; +use warp::{hyper::StatusCode, Filter}; use atuin_common::api::SyncHistoryRequest; @@ -56,7 +56,7 @@ fn with_user( pub async fn router( settings: &Settings, -) -> Result + Clone> { +) -> Result + Clone> { let postgres = Postgres::new(settings.db_uri.as_str()).await?; let index = warp::get().and(warp::path::end()).map(handlers::index); @@ -115,7 +115,8 @@ pub async fn router( .or(add_history) .or(user) .or(register) - .or(login), + .or(login) + .or(warp::any().map(|| warp::reply::with_status("☕", StatusCode::IM_A_TEAPOT))), ) .with(warp::filters::log::log("atuin::api")); diff --git a/atuin-server/src/settings.rs b/atuin-server/src/settings.rs index e51b6b2a..7364656e 100644 --- a/atuin-server/src/settings.rs +++ b/atuin-server/src/settings.rs @@ -3,7 +3,6 @@ use std::io::prelude::*; use std::path::PathBuf; use config::{Config, Environment, File as ConfigFile}; -use directories::ProjectDirs; use eyre::{eyre, Result}; pub const HISTORY_PAGE_SIZE: i64 = 100; @@ -18,8 +17,8 @@ pub struct Settings { impl Settings { pub fn new() -> Result { - let config_dir = ProjectDirs::from("com", "elliehuxtable", "atuin").unwrap(); - let config_dir = config_dir.config_dir(); + let config_dir = atuin_common::utils::config_dir(); + let config_dir = config_dir.as_path(); create_dir_all(config_dir)?; diff --git a/docs/config.md b/docs/config.md new file mode 100644 index 00000000..7c042251 --- /dev/null +++ b/docs/config.md @@ -0,0 +1,99 @@ +# Config + +Atuin maintains two configuration files, stored in `~/.config/atuin/`. We store +data in `~/.local/share/atuin` (unless overridden by XDG\_\*). + +You can also change the path to the configuration directory by setting +`ATUIN_CONFIG_DIR`. For example + +``` +export ATUIN_CONFIG_DIR = /home/ellie/.atuin +``` + +## Client config + +``` +~/.config/atuin/config.toml +``` + +The client runs on a user's machine, and unless you're running a server, this +is what you care about. + +See [config.toml](../atuin-client/config.toml) for an example + +### `dialect` + +This configures how the [stats](stats.md) command parses dates. It has two +possible values + +``` +dialect = "uk" +``` + +or + +``` +dialect = "us" +``` + +and defaults to "us". + +### `auto_sync` + +Configures whether or not to automatically sync, when logged in. Defaults to +true + +``` +auto_sync = true/false +``` + +### `sync_address` + +The address of the server to sync with! Defaults to `https://api.atuin.sh`. + +``` +sync_address = "https://api.atuin.sh" +``` + +### `sync_frequency` + +How often to automatically sync with the server. This can be given in a +"human readable" format. For example, `10s`, `20m`, `1h`, etc. Defaults to `1h`. + +If set to `0`, Atuin will sync after every command. Some servers may potentially +rate limit, which won't cause any issues. + +``` +sync_frequency = "1h" +``` + +### `db_path` + +The path to the Atuin SQlite database. Defaults to +`~/.local/share/atuin/history.db`. + +``` +db_path = "~/.history.db" +``` + +### `key_path` + +The path to the Atuin encryption key. Defaults to +`~/.local/share/atuin/key`. + +``` +key = "~/.atuin-key" +``` + +### `session_path` + +The path to the Atuin server session file. Defaults to +`~/.local/share/atuin/session`. This is essentially just an API token + +``` +key = "~/.atuin-session" +``` + +## Server config + +`// TODO` diff --git a/docs/import.md b/docs/import.md new file mode 100644 index 00000000..9fc0580e --- /dev/null +++ b/docs/import.md @@ -0,0 +1,27 @@ +# `atuin import` + +Atuin can import your history from your "old" history file + +`atuin import auto` will attempt to figure out your shell (via \$SHELL) and run +the correct importer + +Unfortunately these older files do not store as much information as Atuin does, +so not all features are available with imported data. + +# zsh + +``` +atuin import zsh +``` + +If you've set HISTFILE, this should be picked up! If not, try + +``` +HISTFILE=/path/to/history/file atuin import zsh +``` + +This supports both the simple and extended format + +# bash + +TODO diff --git a/docs/list.md b/docs/list.md new file mode 100644 index 00000000..1b04a5b8 --- /dev/null +++ b/docs/list.md @@ -0,0 +1,11 @@ +# Listing history + +``` +atuin history list +``` + +| Arg | Description | +| -------------- | ----------------------------------------------------------------------------- | +| `--cwd/-c` | The directory to list history for (default: all dirs) | +| `--session/-s` | Enable listing history for the current session only (default: false) | +| `--human/-h` | Use human-readable formatting for the timestamp and duration (default: false) | diff --git a/docs/search.md b/docs/search.md new file mode 100644 index 00000000..b1034007 --- /dev/null +++ b/docs/search.md @@ -0,0 +1,39 @@ +# `atuin search` + +``` +atuin search +``` + +Atuin search also supports wildcards, with either the `*` or `%` character. By +default, a prefix search is performed (ie, all queries are automatically +appended with a wildcard. + +| Arg | Description | +| ------------------ | ----------------------------------------------------------------------------- | +| `--cwd/-c` | The directory to list history for (default: all dirs) | +| `--exclude-cwd` | Do not include commands that ran in this directory (default: none) | +| `--exit/-e` | Filter by exit code (default: none) | +| `--exclude-exit` | Do not include commands that exited with this value (default: none) | +| `--before` | Only include commands ran before this time(default: none) | +| `--after` | Only include commands ran after this time(default: none) | +| `--interactive/-i` | Open the interactive search UI (default: false) | +| `--human/-h` | Use human-readable formatting for the timestamp and duration (default: false) | + +## Examples + +``` +# Open the interactive search TUI +atuin search -i + +# Open the interactive search TUI preloaded with a query +atuin search -i atuin + +# Search for all commands, beginning with cargo, that exited successfully +atuin search --exit 0 cargo + +# Search for all commands, that failed, from the current dir, and were ran before April 1st 2021 +atuin search --exclude-exit 0 --before 01/04/2021 --cwd . + +# Search for all commands, beginning with cargo, that exited successfully, and were ran after yesterday at 3pm +atuin search --exit 0 --after "yesterday 3pm" cargo +``` diff --git a/docs/stats.md b/docs/stats.md new file mode 100644 index 00000000..9c08ce19 --- /dev/null +++ b/docs/stats.md @@ -0,0 +1,36 @@ +# `atuin stats` + +Atuin can also calculate stats based on your history - this is currently a +little basic, but more features to come + +``` +$ atuin stats day last friday + ++---------------------+------------+ +| Statistic | Value | ++---------------------+------------+ +| Most used command | git status | ++---------------------+------------+ +| Commands ran | 450 | ++---------------------+------------+ +| Unique commands ran | 213 | ++---------------------+------------+ + +$ atuin stats day 01/01/21 # also accepts absolute dates +``` + +It can also calculate statistics for all of known history: + +``` +$ atuin stats all + ++---------------------+-------+ +| Statistic | Value | ++---------------------+-------+ +| Most used command | ls | ++---------------------+-------+ +| Commands ran | 8190 | ++---------------------+-------+ +| Unique commands ran | 2996 | ++---------------------+-------+ +``` diff --git a/docs/sync.md b/docs/sync.md new file mode 100644 index 00000000..78510526 --- /dev/null +++ b/docs/sync.md @@ -0,0 +1,55 @@ +# `atuin sync` + +Atuin can backup your history to a server, and use this to ensure multiple +machines have the same shell history. This is all encrypted end-to-end, so the +server operator can _never_ see your data! + +Anyone can host a server (try `atuin server start`, more docs to follow), but I +host one at https://api.atuin.sh. This is the default server address, which can +be changed in the [config](docs/config.md). Again, I _cannot_ see your data, and +do not want to. + +## Sync frequency + +Syncing will happen automatically, unless configured otherwise. The sync +frequency is configurable in [config](docs/config.md) + +## Sync + +You can manually trigger a sync with `atuin sync` + +## Register + +Register for a sync account with + +``` +atuin register -u -e -p +``` + +Usernames must be unique, and emails shall only be used for important +notifications (security breaches, changes to service, etc). + +Upon success, you are also logged in :) Syncing should happen automatically from +here! + +## Key + +As all your data is encrypted, Atuin generates a key for you. It's stored in the +Atuin data directory (`~/.local/share/atuin` on Linux). + +You can also get this with + +``` +atuin key +``` + +Never share this with anyone! + +## Login + +If you want to login to a new machine, you will require your encryption key +(`atuin key`). + +``` +atuin login -u -p -k +``` diff --git a/install.sh b/install.sh new file mode 100644 index 00000000..162602c0 --- /dev/null +++ b/install.sh @@ -0,0 +1 @@ +#!/ diff --git a/migrations/.gitkeep b/migrations/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/migrations/00000000000000_diesel_initial_setup/down.sql b/migrations/00000000000000_diesel_initial_setup/down.sql deleted file mode 100644 index a9f52609..00000000 --- a/migrations/00000000000000_diesel_initial_setup/down.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - -DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); -DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/migrations/00000000000000_diesel_initial_setup/up.sql b/migrations/00000000000000_diesel_initial_setup/up.sql deleted file mode 100644 index d68895b1..00000000 --- a/migrations/00000000000000_diesel_initial_setup/up.sql +++ /dev/null @@ -1,36 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - - - - --- Sets up a trigger for the given table to automatically set a column called --- `updated_at` whenever the row is modified (unless `updated_at` was included --- in the modified columns) --- --- # Example --- --- ```sql --- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); --- --- SELECT diesel_manage_updated_at('users'); --- ``` -CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ -BEGIN - EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); -END; -$$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ -BEGIN - IF ( - NEW IS DISTINCT FROM OLD AND - NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at - ) THEN - NEW.updated_at := current_timestamp; - END IF; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; diff --git a/migrations/2021-03-20-151809_create_history/down.sql b/migrations/2021-03-20-151809_create_history/down.sql deleted file mode 100644 index ea02ce42..00000000 --- a/migrations/2021-03-20-151809_create_history/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- This file should undo anything in `up.sql` -drop table history; diff --git a/migrations/2021-03-20-151809_create_history/up.sql b/migrations/2021-03-20-151809_create_history/up.sql deleted file mode 100644 index 4192b04d..00000000 --- a/migrations/2021-03-20-151809_create_history/up.sql +++ /dev/null @@ -1,13 +0,0 @@ --- Your SQL goes here --- lower case SQL please, this isn't a shouting match -create table history ( - id bigserial primary key, - client_id text not null unique, -- the client-generated ID - user_id bigserial not null, -- allow multiple users - hostname text not null, -- a unique identifier from the client (can be hashed, random, whatever) - timestamp timestamp not null, -- one of the few non-encrypted metadatas - - data varchar(8192) not null, -- store the actual history data, encrypted. I don't wanna know! - - created_at timestamp not null default current_timestamp -); diff --git a/migrations/2021-03-20-171007_create_users/down.sql b/migrations/2021-03-20-171007_create_users/down.sql deleted file mode 100644 index 5795f6b3..00000000 --- a/migrations/2021-03-20-171007_create_users/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- This file should undo anything in `up.sql` -drop table users; diff --git a/migrations/2021-03-20-171007_create_users/up.sql b/migrations/2021-03-20-171007_create_users/up.sql deleted file mode 100644 index 46c6a372..00000000 --- a/migrations/2021-03-20-171007_create_users/up.sql +++ /dev/null @@ -1,11 +0,0 @@ --- Your SQL goes here -create table users ( - id bigserial primary key, -- also store our own ID - username varchar(32) not null unique, -- being able to contact users is useful - email varchar(128) not null unique, -- being able to contact users is useful - password varchar(128) not null unique -); - --- the prior index is case sensitive :( -CREATE UNIQUE INDEX email_unique_idx on users (LOWER(email)); -CREATE UNIQUE INDEX username_unique_idx on users (LOWER(username)); diff --git a/migrations/2021-03-21-181750_create_sessions/down.sql b/migrations/2021-03-21-181750_create_sessions/down.sql deleted file mode 100644 index 53a779c9..00000000 --- a/migrations/2021-03-21-181750_create_sessions/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- This file should undo anything in `up.sql` -drop table sessions; diff --git a/migrations/2021-03-21-181750_create_sessions/up.sql b/migrations/2021-03-21-181750_create_sessions/up.sql deleted file mode 100644 index b81705e2..00000000 --- a/migrations/2021-03-21-181750_create_sessions/up.sql +++ /dev/null @@ -1,6 +0,0 @@ --- Your SQL goes here -create table sessions ( - id bigserial primary key, - user_id bigserial, - token varchar(128) unique not null -); diff --git a/src/command/event.rs b/src/command/event.rs index b205be70..f09752d6 100644 --- a/src/command/event.rs +++ b/src/command/event.rs @@ -1,7 +1,7 @@ -use std::sync::mpsc; use std::thread; use std::time::Duration; +use crossbeam_channel::unbounded; use termion::event::Key; use termion::input::TermRead; @@ -13,7 +13,7 @@ pub enum Event { /// A small event handler that wrap termion input and tick events. Each event /// type is handled in its own thread and returned to a common `Receiver` pub struct Events { - rx: mpsc::Receiver>, + rx: crossbeam_channel::Receiver>, } #[derive(Debug, Clone, Copy)] @@ -37,7 +37,7 @@ impl Events { } pub fn with_config(config: Config) -> Events { - let (tx, rx) = mpsc::channel(); + let (tx, rx) = unbounded(); { let tx = tx.clone(); @@ -62,7 +62,7 @@ impl Events { Events { rx } } - pub fn next(&self) -> Result, mpsc::RecvError> { + pub fn next(&self) -> Result, crossbeam_channel::RecvError> { self.rx.recv() } } diff --git a/src/command/history.rs b/src/command/history.rs index a88aeae2..7542496c 100644 --- a/src/command/history.rs +++ b/src/command/history.rs @@ -1,7 +1,10 @@ use std::env; +use std::io::Write; +use std::time::Duration; use eyre::Result; use structopt::StructOpt; +use tabwriter::TabWriter; use atuin_client::database::Database; use atuin_client::history::History; @@ -36,29 +39,65 @@ pub enum Cmd { #[structopt(long, short)] session: bool, - }, - #[structopt( - about="search for a command", - aliases=&["se", "sea", "sear", "searc"], - )] - Search { query: Vec }, + #[structopt(long, short)] + human: bool, + }, #[structopt( about="get the last command ran", aliases=&["la", "las"], )] - Last {}, + Last { + #[structopt(long, short)] + human: bool, + }, } -fn print_list(h: &[History]) { - for i in h { - println!("{}", i.command); +#[allow(clippy::clippy::cast_sign_loss)] +pub fn print_list(h: &[History], human: bool) { + let mut writer = TabWriter::new(std::io::stdout()).padding(2); + + let lines = h.iter().map(|h| { + if human { + let duration = humantime::format_duration(Duration::from_nanos(std::cmp::max( + h.duration, 0, + ) as u64)) + .to_string(); + let duration: Vec<&str> = duration.split(' ').collect(); + let duration = duration[0]; + + format!( + "{}\t{}\t{}\n", + h.timestamp.format("%Y-%m-%d %H:%M:%S"), + h.command.trim(), + duration, + ) + } else { + format!( + "{}\t{}\t{}\n", + h.timestamp.timestamp_nanos(), + h.command.trim(), + h.duration + ) + } + }); + + for i in lines.rev() { + writer + .write_all(i.as_bytes()) + .expect("failed to write to tab writer"); } + + writer.flush().expect("failed to flush tab writer"); } impl Cmd { - pub async fn run(&self, settings: &Settings, db: &mut (impl Database + Send)) -> Result<()> { + pub async fn run( + &self, + settings: &Settings, + db: &mut (impl Database + Send + Sync), + ) -> Result<()> { match self { Self::Start { command: words } => { let command = words.join(" "); @@ -69,7 +108,7 @@ impl Cmd { // print the ID // we use this as the key for calling end println!("{}", h.id); - db.save(&h)?; + db.save(&h).await?; Ok(()) } @@ -78,7 +117,7 @@ impl Cmd { return Ok(()); } - let mut h = db.load(id)?; + let mut h = db.load(id).await?; if h.duration > 0 { debug!("cannot end history - already has duration"); @@ -90,7 +129,7 @@ impl Cmd { h.exit = *exit; h.duration = chrono::Utc::now().timestamp_nanos() - h.timestamp.timestamp_nanos(); - db.update(&h)?; + db.update(&h).await?; if settings.should_sync()? { debug!("running periodic background sync"); @@ -102,41 +141,38 @@ impl Cmd { Ok(()) } - Self::List { session, cwd, .. } => { - const QUERY_SESSION: &str = "select * from history where session = ?;"; - const QUERY_DIR: &str = "select * from history where cwd = ?;"; - const QUERY_SESSION_DIR: &str = - "select * from history where cwd = ?1 and session = ?2;"; - + Self::List { + session, + cwd, + human, + } => { let params = (session, cwd); - let cwd = env::current_dir()?.display().to_string(); let session = env::var("ATUIN_SESSION")?; - let history = match params { - (false, false) => db.list(None, false)?, - (true, false) => db.query(QUERY_SESSION, &[session.as_str()])?, - (false, true) => db.query(QUERY_DIR, &[cwd.as_str()])?, - (true, true) => { - db.query(QUERY_SESSION_DIR, &[cwd.as_str(), session.as_str()])? - } - }; + let query_session = format!("select * from history where session = {};", session); - print_list(&history); + let query_dir = format!("select * from history where cwd = {};", cwd); + let query_session_dir = format!( + "select * from history where cwd = {} and session = {};", + cwd, session + ); - Ok(()) - } + let history = match params { + (false, false) => db.list(None, false).await?, + (true, false) => db.query_history(query_session.as_str()).await?, + (false, true) => db.query_history(query_dir.as_str()).await?, + (true, true) => db.query_history(query_session_dir.as_str()).await?, + }; - Self::Search { query } => { - let history = db.prefix_search(&query.join(""))?; - print_list(&history); + print_list(&history, *human); Ok(()) } - Self::Last {} => { - let last = db.last()?; - print_list(&[last]); + Self::Last { human } => { + let last = db.last().await?; + print_list(&[last], *human); Ok(()) } diff --git a/src/command/import.rs b/src/command/import.rs index 56fb30a7..931e7af4 100644 --- a/src/command/import.rs +++ b/src/command/import.rs @@ -26,7 +26,7 @@ pub enum Cmd { } impl Cmd { - pub fn run(&self, db: &mut impl Database) -> Result<()> { + pub async fn run(&self, db: &mut (impl Database + Send + Sync)) -> Result<()> { println!(" A'Tuin "); println!("======================"); println!(" \u{1f30d} "); @@ -41,19 +41,19 @@ impl Cmd { if shell.ends_with("/zsh") { println!("Detected ZSH"); - import_zsh(db) + import_zsh(db).await } else { println!("cannot import {} history", shell); Ok(()) } } - Self::Zsh => import_zsh(db), + Self::Zsh => import_zsh(db).await, } } } -fn import_zsh(db: &mut impl Database) -> Result<()> { +async fn import_zsh(db: &mut (impl Database + Send + Sync)) -> Result<()> { // oh-my-zsh sets HISTFILE=~/.zhistory // zsh has no default value for this var, but uses ~/.zhistory. // we could maybe be smarter about this in the future :) @@ -103,7 +103,7 @@ fn import_zsh(db: &mut impl Database) -> Result<()> { buf.push(i); if buf.len() == buf_size { - db.save_bulk(&buf)?; + db.save_bulk(&buf).await?; progress.inc(buf.len() as u64); buf.clear(); @@ -111,7 +111,7 @@ fn import_zsh(db: &mut impl Database) -> Result<()> { } if !buf.is_empty() { - db.save_bulk(&buf)?; + db.save_bulk(&buf).await?; progress.inc(buf.len() as u64); } diff --git a/src/command/mod.rs b/src/command/mod.rs index 805ad9f0..78e6402e 100644 --- a/src/command/mod.rs +++ b/src/command/mod.rs @@ -47,12 +47,27 @@ pub enum AtuinCmd { #[structopt(long, short, about = "filter search result by directory")] cwd: Option, + #[structopt(long = "exclude-cwd", about = "exclude directory from results")] + exclude_cwd: Option, + #[structopt(long, short, about = "filter search result by exit code")] exit: Option, + #[structopt(long = "exclude-exit", about = "exclude results with this exit code")] + exclude_exit: Option, + + #[structopt(long, short, about = "only include results added before this date")] + before: Option, + + #[structopt(long, about = "only include results after this date")] + after: Option, + #[structopt(long, short, about = "open interactive search UI")] interactive: bool, + #[structopt(long, short, about = "use human-readable formatting for time")] + human: bool, + query: Vec, }, @@ -79,20 +94,39 @@ impl AtuinCmd { let db_path = PathBuf::from(client_settings.db_path.as_str()); - let mut db = Sqlite::new(db_path)?; + let mut db = Sqlite::new(db_path).await?; match self { Self::History(history) => history.run(&client_settings, &mut db).await, - Self::Import(import) => import.run(&mut db), + Self::Import(import) => import.run(&mut db).await, Self::Server(server) => server.run(&server_settings).await, - Self::Stats(stats) => stats.run(&mut db, &client_settings), + Self::Stats(stats) => stats.run(&mut db, &client_settings).await, Self::Init => init::init(), Self