summaryrefslogtreecommitdiffstats
path: root/atuin-server
diff options
context:
space:
mode:
authorEllie Huxtable <ellie@elliehuxtable.com>2024-01-05 17:57:49 +0000
committerGitHub <noreply@github.com>2024-01-05 17:57:49 +0000
commit7bc6ccdd70422f8fc763e2fd814a481bc79ce7b5 (patch)
treea1c064a7c7394d261711c6e046d4c60791e6cf6f /atuin-server
parent604ae40b9dee24e3ebe66cbc1ef1020683a34699 (diff)
feat: rework record sync for improved reliability (#1478)
* feat: rework record sync for improved reliability So, to tell a story 1. We introduced the record sync, intended to be the new algorithm to sync history. 2. On top of this, I added the KV store. This was intended as a simple test of the record sync, and to see if people wanted that sort of functionality 3. History remained syncing via the old means, as while it had issues it worked more-or-less OK. And we are aware of its flaws 4. If KV syncing worked ok, history would be moved across KV syncing ran ok for 6mo or so, so I started to move across history. For several weeks, I ran a local fork of Atuin + the server that synced via records instead. The record store maintained ordering via a linked list, which was a mistake. It performed well in testing, but was really difficult to debug and reason about. So when a few small sync issues occured, they took an extremely long time to debug. This PR is huge, which I regret. It involves replacing the "parent" relationship that records once had (pointing to the previous record) with a simple index (generally referred to as idx). This also means we had to change the recordindex, which referenced "tails". Tails were the last item in the chain. Now that we use an "array" vs linked list, that logic was also replaced. And is much simpler :D Same for the queries that act on this data. ---- This isn't final - we still need to add 1. Proper server/client error handling, which has been lacking for a while 2. The actual history implementation on top This exists in a branch, just without deletions. Won't be much to add that, I just don't want to make this any larger than it already is The _only_ caveat here is that we basically lose data synced via the old record store. This is the KV data from before. It hasn't been deleted or anything, just no longer hooked up. So it's totally possible to write a migration script. I just need to do that. * update .gitignore * use correct endpoint * fix for stores with length of 1 * use create/delete enum for history store * lint, remove unneeded host_id * remove prints * add command to import old history * add enable/disable switch for record sync * add record sync to auto sync * satisfy the almighty clippy * remove file that I did not mean to commit * feedback
Diffstat (limited to 'atuin-server')
-rw-r--r--atuin-server/src/handlers/mod.rs1
-rw-r--r--atuin-server/src/handlers/record.rs109
-rw-r--r--atuin-server/src/handlers/v0/mod.rs1
-rw-r--r--atuin-server/src/handlers/v0/record.rs111
-rw-r--r--atuin-server/src/router.rs11
5 files changed, 143 insertions, 90 deletions
diff --git a/atuin-server/src/handlers/mod.rs b/atuin-server/src/handlers/mod.rs
index 18b1af8ed..b66a20bf5 100644
--- a/atuin-server/src/handlers/mod.rs
+++ b/atuin-server/src/handlers/mod.rs
@@ -8,6 +8,7 @@ pub mod history;
pub mod record;
pub mod status;
pub mod user;
+pub mod v0;
const VERSION: &str = env!("CARGO_PKG_VERSION");
diff --git a/atuin-server/src/handlers/record.rs b/atuin-server/src/handlers/record.rs
index 91b937b39..b5c07c5b8 100644
--- a/atuin-server/src/handlers/record.rs
+++ b/atuin-server/src/handlers/record.rs
@@ -1,109 +1,46 @@
-use axum::{extract::Query, extract::State, Json};
+use axum::{response::IntoResponse, Json};
use http::StatusCode;
-use metrics::counter;
-use serde::Deserialize;
-use tracing::{error, instrument};
+use serde_json::json;
+use tracing::instrument;
use super::{ErrorResponse, ErrorResponseStatus, RespExt};
-use crate::router::{AppState, UserAuth};
+use crate::router::UserAuth;
use atuin_server_database::Database;
-use atuin_common::record::{EncryptedData, HostId, Record, RecordId, RecordIndex};
+use atuin_common::record::{EncryptedData, Record};
#[instrument(skip_all, fields(user.id = user.id))]
pub async fn post<DB: Database>(
UserAuth(user): UserAuth,
- state: State<AppState<DB>>,
- Json(records): Json<Vec<Record<EncryptedData>>>,
) -> Result<(), ErrorResponseStatus<'static>> {
- let State(AppState { database, settings }) = state;
-
- tracing::debug!(
- count = records.len(),
- user = user.username,
- "request to add records"
+ // anyone who has actually used the old record store (a very small number) will see this error
+ // upon trying to sync.
+ // 1. The status endpoint will say that the server has nothing
+ // 2. The client will try to upload local records
+ // 3. Sync will fail with this error
+
+ // If the client has no local records, they will see the empty index and do nothing. For the
+ // vast majority of users, this is the case.
+ return Err(
+ ErrorResponse::reply("record store deprecated; please upgrade")
+ .with_status(StatusCode::BAD_REQUEST),
);
-
- counter!("atuin_record_uploaded", records.len() as u64);
-
- let too_big = records
- .iter()
- .any(|r| r.data.data.len() >= settings.max_record_size || settings.max_record_size == 0);
-
- if too_big {
- counter!("atuin_record_too_large", 1);
-
- return Err(
- ErrorResponse::reply("could not add records; record too large")
- .with_status(StatusCode::BAD_REQUEST),
- );
- }
-
- if let Err(e) = database.add_records(&user, &records).await {
- error!("failed to add record: {}", e);
-
- return Err(ErrorResponse::reply("failed to add record")
- .with_status(StatusCode::INTERNAL_SERVER_ERROR));
- };
-
- Ok(())
}
#[instrument(skip_all, fields(user.id = user.id))]
-pub async fn index<DB: Database>(
- UserAuth(user): UserAuth,
- state: State<AppState<DB>>,
-) -> Result<Json<RecordIndex>, ErrorResponseStatus<'static>> {
- let State(AppState {
- database,
- settings: _,
- }) = state;
-
- let record_index = match database.tail_records(&user).await {
- Ok(index) => index,
- Err(e) => {
- error!("failed to get record index: {}", e);
+pub async fn index<DB: Database>(UserAuth(user): UserAuth) -> axum::response::Response {
+ let ret = json!({
+ "hosts": {}
+ });
- return Err(ErrorResponse::reply("failed to calculate record index")
- .with_status(StatusCode::INTERNAL_SERVER_ERROR));
- }
- };
-
- Ok(Json(record_index))
-}
-
-#[derive(Deserialize)]
-pub struct NextParams {
- host: HostId,
- tag: String,
- start: Option<RecordId>,
- count: u64,
+ ret.to_string().into_response()
}
#[instrument(skip_all, fields(user.id = user.id))]
-pub async fn next<DB: Database>(
- params: Query<NextParams>,
+pub async fn next(
UserAuth(user): UserAuth,
- state: State<AppState<DB>>,
) -> Result<Json<Vec<Record<EncryptedData>>>, ErrorResponseStatus<'static>> {
- let State(AppState {
- database,
- settings: _,
- }) = state;
- let params = params.0;
-
- let records = match database
- .next_records(&user, params.host, params.tag, params.start, params.count)
- .await
- {
- Ok(records) => records,
- Err(e) => {
- error!("failed to get record index: {}", e);
-
- return Err(ErrorResponse::reply("failed to calculate record index")
- .with_status(StatusCode::INTERNAL_SERVER_ERROR));
- }
- };
+ let records = Vec::new();
Ok(Json(records))
}
diff --git a/atuin-server/src/handlers/v0/mod.rs b/atuin-server/src/handlers/v0/mod.rs
new file mode 100644
index 000000000..78fb47b8a
--- /dev/null
+++ b/atuin-server/src/handlers/v0/mod.rs
@@ -0,0 +1 @@
+pub(crate) mod record;
diff --git a/atuin-server/src/handlers/v0/record.rs b/atuin-server/src/handlers/v0/record.rs
new file mode 100644
index 000000000..79b2f80c9
--- /dev/null
+++ b/atuin-server/src/handlers/v0/record.rs
@@ -0,0 +1,111 @@
+use axum::{extract::Query, extract::State, Json};
+use http::StatusCode;
+use metrics::counter;
+use serde::Deserialize;
+use tracing::{error, instrument};
+
+use crate::{
+ handlers::{ErrorResponse, ErrorResponseStatus, RespExt},
+ router::{AppState, UserAuth},
+};
+use atuin_server_database::Database;
+
+use atuin_common::record::{EncryptedData, HostId, Record, RecordIdx, RecordStatus};
+
+#[instrument(skip_all, fields(user.id = user.id))]
+pub async fn post<DB: Database>(
+ UserAuth(user): UserAuth,
+ state: State<AppState<DB>>,
+ Json(records): Json<Vec<Record<EncryptedData>>>,
+) -> Result<(), ErrorResponseStatus<'static>> {
+ let State(AppState { database, settings }) = state;
+
+ tracing::debug!(
+ count = records.len(),
+ user = user.username,
+ "request to add records"
+ );
+
+ counter!("atuin_record_uploaded", records.len() as u64);
+
+ let too_big = records
+ .iter()
+ .any(|r| r.data.data.len() >= settings.max_record_size || settings.max_record_size == 0);
+
+ if too_big {
+ counter!("atuin_record_too_large", 1);
+
+ return Err(
+ ErrorResponse::reply("could not add records; record too large")
+ .with_status(StatusCode::BAD_REQUEST),
+ );
+ }
+
+ if let Err(e) = database.add_records(&user, &records).await {
+ error!("failed to add record: {}", e);
+
+ return Err(ErrorResponse::reply("failed to add record")
+ .with_status(StatusCode::INTERNAL_SERVER_ERROR));
+ };
+
+ Ok(())
+}
+
+#[instrument(skip_all, fields(user.id = user.id))]
+pub async fn index<DB: Database>(
+ UserAuth(user): UserAuth,
+ state: State<AppState<DB>>,
+) -> Result<Json<RecordStatus>, ErrorResponseStatus<'static>> {
+ let State(AppState {
+ database,
+ settings: _,
+ }) = state;
+
+ let record_index = match database.status(&user).await {
+ Ok(index) => index,
+ Err(e) => {
+ error!("failed to get record index: {}", e);
+
+ return Err(ErrorResponse::reply("failed to calculate record index")
+ .with_status(StatusCode::INTERNAL_SERVER_ERROR));
+ }
+ };
+
+ Ok(Json(record_index))
+}
+
+#[derive(Deserialize)]
+pub struct NextParams {
+ host: HostId,
+ tag: String,
+ start: Option<RecordIdx>,
+ count: u64,
+}
+
+#[instrument(skip_all, fields(user.id = user.id))]
+pub async fn next<DB: Database>(
+ params: Query<NextParams>,
+ UserAuth(user): UserAuth,
+ state: State<AppState<DB>>,
+) -> Result<Json<Vec<Record<EncryptedData>>>, ErrorResponseStatus<'static>> {
+ let State(AppState {
+ database,
+ settings: _,
+ }) = state;
+ let params = params.0;
+
+ let records = match database
+ .next_records(&user, params.host, params.tag, params.start, params.count)
+ .await
+ {
+ Ok(records) => records,
+ Err(e) => {
+ error!("failed to get record index: {}", e);
+
+ return Err(ErrorResponse::reply("failed to calculate record index")
+ .with_status(StatusCode::INTERNAL_SERVER_ERROR));
+ }
+ };
+
+ Ok(Json(records))
+}
diff --git a/atuin-server/src/router.rs b/atuin-server/src/router.rs
index 42cfaa865..500e1a29a 100644
--- a/atuin-server/src/router.rs
+++ b/atuin-server/src/router.rs
@@ -118,13 +118,16 @@ pub fn router<DB: Database>(database: DB, settings: Settings<DB::Settings>) -> R
.route("/sync/status", get(handlers::status::status))
.route("/history", post(handlers::history::add))
.route("/history", delete(handlers::history::delete))
- .route("/record", post(handlers::record::post))
- .route("/record", get(handlers::record::index))
- .route("/record/next", get(handlers::record::next))
.route("/user/:username", get(handlers::user::get))
.route("/account", delete(handlers::user::delete))
.route("/register", post(handlers::user::register))
- .route("/login", post(handlers::user::login));
+ .route("/login", post(handlers::user::login))
+ .route("/record", post(handlers::record::post::<DB>))
+ .route("/record", get(handlers::record::index::<DB>))
+ .route("/record/next", get(handlers::record::next))
+ .route("/api/v0/record", post(handlers::v0::record::post))
+ .route("/api/v0/record", get(handlers::v0::record::index))
+ .route("/api/v0/record/next", get(handlers::v0::record::next));
let path = settings.path.as_str();
if path.is_empty() {