summaryrefslogtreecommitdiffstats
path: root/atuin-server
diff options
context:
space:
mode:
authorConrad Ludgate <conradludgate@gmail.com>2023-06-12 09:04:35 +0100
committerGitHub <noreply@github.com>2023-06-12 09:04:35 +0100
commit8655c93853506acf05f6ae4e58bfc2c6198be254 (patch)
tree22d20b35636ad2eb717d58c93ae07378adbb76eb /atuin-server
parentdccdb2c33f40b05377297eff9fa2090442e77286 (diff)
refactor server to allow pluggable db and tracing (#1036)
* refactor server to allow pluggable db and tracing * clean up * fix descriptions * remove dependencies
Diffstat (limited to 'atuin-server')
-rw-r--r--atuin-server/Cargo.toml4
-rw-r--r--atuin-server/migrations/20210425153745_create_history.sql11
-rw-r--r--atuin-server/migrations/20210425153757_create_users.sql10
-rw-r--r--atuin-server/migrations/20210425153800_create_sessions.sql6
-rw-r--r--atuin-server/migrations/20220419082412_add_count_trigger.sql51
-rw-r--r--atuin-server/migrations/20220421073605_fix_count_trigger_delete.sql35
-rw-r--r--atuin-server/migrations/20220421174016_larger-commands.sql3
-rw-r--r--atuin-server/migrations/20220426172813_user-created-at.sql1
-rw-r--r--atuin-server/migrations/20220505082442_create-events.sql14
-rw-r--r--atuin-server/migrations/20220610074049_history-length.sql2
-rw-r--r--atuin-server/migrations/20230315220537_drop-events.sql2
-rw-r--r--atuin-server/migrations/20230315224203_create-deleted.sql5
-rw-r--r--atuin-server/migrations/20230515221038_trigger-delete-only.sql30
-rw-r--r--atuin-server/src/auth.rs222
-rw-r--r--atuin-server/src/calendar.rs17
-rw-r--r--atuin-server/src/database.rs510
-rw-r--r--atuin-server/src/handlers/history.rs44
-rw-r--r--atuin-server/src/handlers/status.rs5
-rw-r--r--atuin-server/src/handlers/user.rs22
-rw-r--r--atuin-server/src/lib.rs43
-rw-r--r--atuin-server/src/models.rs49
-rw-r--r--atuin-server/src/router.rs20
-rw-r--r--atuin-server/src/settings.rs12
23 files changed, 81 insertions, 1037 deletions
diff --git a/atuin-server/Cargo.toml b/atuin-server/Cargo.toml
index e4cbf3e0e..f308fa30f 100644
--- a/atuin-server/Cargo.toml
+++ b/atuin-server/Cargo.toml
@@ -11,20 +11,18 @@ repository = { workspace = true }
[dependencies]
atuin-common = { path = "../atuin-common", version = "15.0.0" }
+atuin-server-database = { path = "../atuin-server-database", version = "15.0.0" }
tracing = "0.1"
chrono = { workspace = true }
eyre = { workspace = true }
uuid = { workspace = true }
-whoami = { workspace = true }
config = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
-sodiumoxide = { workspace = true }
base64 = { workspace = true }
rand = { workspace = true }
tokio = { workspace = true }
-sqlx = { workspace = true }
async-trait = { workspace = true }
axum = "0.6.4"
http = "0.2"
diff --git a/atuin-server/migrations/20210425153745_create_history.sql b/atuin-server/migrations/20210425153745_create_history.sql
deleted file mode 100644
index 2c2d17b0a..000000000
--- a/atuin-server/migrations/20210425153745_create_history.sql
+++ /dev/null
@@ -1,11 +0,0 @@
-create table history (
- id bigserial primary key,
- client_id text not null unique, -- the client-generated ID
- user_id bigserial not null, -- allow multiple users
- hostname text not null, -- a unique identifier from the client (can be hashed, random, whatever)
- timestamp timestamp not null, -- one of the few non-encrypted metadatas
-
- data varchar(8192) not null, -- store the actual history data, encrypted. I don't wanna know!
-
- created_at timestamp not null default current_timestamp
-);
diff --git a/atuin-server/migrations/20210425153757_create_users.sql b/atuin-server/migrations/20210425153757_create_users.sql
deleted file mode 100644
index a25dcced6..000000000
--- a/atuin-server/migrations/20210425153757_create_users.sql
+++ /dev/null
@@ -1,10 +0,0 @@
-create table users (
- id bigserial primary key, -- also store our own ID
- username varchar(32) not null unique, -- being able to contact users is useful
- email varchar(128) not null unique, -- being able to contact users is useful
- password varchar(128) not null unique
-);
-
--- the prior index is case sensitive :(
-CREATE UNIQUE INDEX email_unique_idx on users (LOWER(email));
-CREATE UNIQUE INDEX username_unique_idx on users (LOWER(username));
diff --git a/atuin-server/migrations/20210425153800_create_sessions.sql b/atuin-server/migrations/20210425153800_create_sessions.sql
deleted file mode 100644
index c2fb65598..000000000
--- a/atuin-server/migrations/20210425153800_create_sessions.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- Add migration script here
-create table sessions (
- id bigserial primary key,
- user_id bigserial,
- token varchar(128) unique not null
-);
diff --git a/atuin-server/migrations/20220419082412_add_count_trigger.sql b/atuin-server/migrations/20220419082412_add_count_trigger.sql
deleted file mode 100644
index dd1afa88b..000000000
--- a/atuin-server/migrations/20220419082412_add_count_trigger.sql
+++ /dev/null
@@ -1,51 +0,0 @@
--- Prior to this, the count endpoint was super naive and just ran COUNT(1).
--- This is slow asf. Now that we have an amount of actual traffic,
--- stop doing that!
--- This basically maintains a count, so we can read ONE row, instead of ALL the
--- rows. Much better.
--- Future optimisation could use some sort of cache so we don't even need to hit
--- postgres at all.
-
-create table total_history_count_user(
- id bigserial primary key,
- user_id bigserial,
- total integer -- try and avoid using keywords - hence total, not count
-);
-
-create or replace function user_history_count()
-returns trigger as
-$func$
-begin
- if (TG_OP='INSERT') then
- update total_history_count_user set total = total + 1 where user_id = new.user_id;
-
- if not found then
- insert into total_history_count_user(user_id, total)
- values (
- new.user_id,
- (select count(1) from history where user_id = new.user_id)
- );
- end if;
-
- elsif (TG_OP='DELETE') then
- update total_history_count_user set total = total - 1 where user_id = new.user_id;
-
- if not found then
- insert into total_history_count_user(user_id, total)
- values (
- new.user_id,
- (select count(1) from history where user_id = new.user_id)
- );
- end if;
- end if;
-
- return NEW; -- this is actually ignored for an after trigger, but oh well
-end;
-$func$
-language plpgsql volatile -- pldfplplpflh
-cost 100; -- default value
-
-create trigger tg_user_history_count
- after insert or delete on history
- for each row
- execute procedure user_history_count();
diff --git a/atuin-server/migrations/20220421073605_fix_count_trigger_delete.sql b/atuin-server/migrations/20220421073605_fix_count_trigger_delete.sql
deleted file mode 100644
index 6198f3001..000000000
--- a/atuin-server/migrations/20220421073605_fix_count_trigger_delete.sql
+++ /dev/null
@@ -1,35 +0,0 @@
--- the old version of this function used NEW in the delete part when it should
--- use OLD
-
-create or replace function user_history_count()
-returns trigger as
-$func$
-begin
- if (TG_OP='INSERT') then
- update total_history_count_user set total = total + 1 where user_id = new.user_id;
-
- if not found then
- insert into total_history_count_user(user_id, total)
- values (
- new.user_id,
- (select count(1) from history where user_id = new.user_id)
- );
- end if;
-
- elsif (TG_OP='DELETE') then
- update total_history_count_user set total = total - 1 where user_id = old.user_id;
-
- if not found then
- insert into total_history_count_user(user_id, total)
- values (
- old.user_id,
- (select count(1) from history where user_id = old.user_id)
- );
- end if;
- end if;
-
- return NEW; -- this is actually ignored for an after trigger, but oh well
-end;
-$func$
-language plpgsql volatile -- pldfplplpflh
-cost 100; -- default value
diff --git a/atuin-server/migrations/20220421174016_larger-commands.sql b/atuin-server/migrations/20220421174016_larger-commands.sql
deleted file mode 100644
index 0ac434339..000000000
--- a/atuin-server/migrations/20220421174016_larger-commands.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- Make it 4x larger. Most commands are less than this, but as it's base64
--- SOME are more than 8192. Should be enough for now.
-ALTER TABLE history ALTER COLUMN data TYPE varchar(32768);
diff --git a/atuin-server/migrations/20220426172813_user-created-at.sql b/atuin-server/migrations/20220426172813_user-created-at.sql
deleted file mode 100644
index a9138194e..000000000
--- a/atuin-server/migrations/20220426172813_user-created-at.sql
+++ /dev/null
@@ -1 +0,0 @@
-alter table users add column created_at timestamp not null default now();
diff --git a/atuin-server/migrations/20220505082442_create-events.sql b/atuin-server/migrations/20220505082442_create-events.sql
deleted file mode 100644
index 57e16ec73..000000000
--- a/atuin-server/migrations/20220505082442_create-events.sql
+++ /dev/null
@@ -1,14 +0,0 @@
-create type event_type as enum ('create', 'delete');
-
-create table events (
- id bigserial primary key,
- client_id text not null unique, -- the client-generated ID
- user_id bigserial not null, -- allow multiple users
- hostname text not null, -- a unique identifier from the client (can be hashed, random, whatever)
- timestamp timestamp not null, -- one of the few non-encrypted metadatas
-
- event_type event_type,
- data text not null, -- store the actual history data, encrypted. I don't wanna know!
-
- created_at timestamp not null default current_timestamp
-);
diff --git a/atuin-server/migrations/20220610074049_history-length.sql b/atuin-server/migrations/20220610074049_history-length.sql
deleted file mode 100644
index b1c230167..000000000
--- a/atuin-server/migrations/20220610074049_history-length.sql
+++ /dev/null
@@ -1,2 +0,0 @@
--- Add migration script here
-alter table history alter column data type text;
diff --git a/atuin-server/migrations/20230315220537_drop-events.sql b/atuin-server/migrations/20230315220537_drop-events.sql
deleted file mode 100644
index fe3cae170..000000000
--- a/atuin-server/migrations/20230315220537_drop-events.sql
+++ /dev/null
@@ -1,2 +0,0 @@
--- Add migration script here
-drop table events;
diff --git a/atuin-server/migrations/20230315224203_create-deleted.sql b/atuin-server/migrations/20230315224203_create-deleted.sql
deleted file mode 100644
index 9a9e6263f..000000000
--- a/atuin-server/migrations/20230315224203_create-deleted.sql
+++ /dev/null
@@ -1,5 +0,0 @@
--- Add migration script here
-alter table history add column if not exists deleted_at timestamp;
-
--- queries will all be selecting the ids of history for a user, that has been deleted
-create index if not exists history_deleted_index on history(client_id, user_id, deleted_at);
diff --git a/atuin-server/migrations/20230515221038_trigger-delete-only.sql b/atuin-server/migrations/20230515221038_trigger-delete-only.sql
deleted file mode 100644
index 3d0bba528..000000000
--- a/atuin-server/migrations/20230515221038_trigger-delete-only.sql
+++ /dev/null
@@ -1,30 +0,0 @@
--- We do not need to run the trigger on deletes, as the only time we are deleting history is when the user
--- has already been deleted
--- This actually slows down deleting all the history a good bit!
-
-create or replace function user_history_count()
-returns trigger as
-$func$
-begin
- if (TG_OP='INSERT') then
- update total_history_count_user set total = total + 1 where user_id = new.user_id;
-
- if not found then
- insert into total_history_count_user(user_id, total)
- values (
- new.user_id,
- (select count(1) from history where user_id = new.user_id)
- );
- end if;
- end if;
-
- return NEW; -- this is actually ignored for an after trigger, but oh well
-end;
-$func$
-language plpgsql volatile -- pldfplplpflh
-cost 100; -- default value
-
-create or replace trigger tg_user_history_count
- after insert on history
- for each row
- execute procedure user_history_count();
diff --git a/atuin-server/src/auth.rs b/atuin-server/src/auth.rs
deleted file mode 100644
index 52a731087..000000000
--- a/atuin-server/src/auth.rs
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
-use self::diesel::prelude::*;
-use eyre::Result;
-use rocket::http::Status;
-use rocket::request::{self, FromRequest, Outcome, Request};
-use rocket::State;
-use rocket_contrib::databases::diesel;
-use sodiumoxide::crypto::pwhash::argon2id13;
-
-use rocket_contrib::json::Json;
-use uuid::Uuid;
-
-use super::models::{NewSession, NewUser, Session, User};
-use super::views::ApiResponse;
-
-use crate::api::{LoginRequest, RegisterRequest};
-use crate::schema::{sessions, users};
-use crate::settings::Settings;
-use crate::utils::hash_secret;
-
-use super::database::AtuinDbConn;
-
-#[derive(Debug)]
-pub enum KeyError {
- Missing,
- Invalid,
-}
-
-pub fn verify_str(secret: &str, verify: &str) -> bool {
- sodiumoxide::init().unwrap();
-
- let mut padded = [0_u8; 128];
- secret.as_bytes().iter().enumerate().for_each(|(i, val)| {
- padded[i] = *val;
- });
-
- match argon2id13::HashedPassword::from_slice(&padded) {
- Some(hp) => argon2id13::pwhash_verify(&hp, verify.as_bytes()),
- None => false,
- }
-}
-
-impl<'a, 'r> FromRequest<'a, 'r> for User {
- type Error = KeyError;
-
- fn from_request(request: &'a Request<'r>) -> request::Outcome<User, Self::Error> {
- let session: Vec<_> = request.headers().get("authorization").collect();
-
- if session.is_empty() {
- return Outcome::Failure((Status::BadRequest, KeyError::Missing));
- } else if session.len() > 1 {
- return Outcome::Failure((Status::BadRequest, KeyError::Invalid));
- }
-
- let session: Vec<_> = session[0].split(' ').collect();
-
- if session.len() != 2 {
- return Outcome::Failure((Status::BadRequest, KeyError::Invalid));
- }
-
- if session[0] != "Token" {
- return Outcome::Failure((Status::BadRequest, KeyError::Invalid));
- }
-
- let session = session[1];
-
- let db = request
- .guard::<AtuinDbConn>()
- .succeeded()
- .expect("failed to load database");
-
- let session = sessions::table
- .filter(sessions::token.eq(session))
- .first::<Session>(&*db);
-
- if session.is_err() {
- return Outcome::Failure((Status::Unauthorized, KeyError::Invalid));
- }
-
- let session = session.unwrap();
-
- let user = users::table.find(session.user_id).first(&*db);
-
- match user {
- Ok(user) => Outcome::Success(user),
- Err(_) => Outcome::Failure((Status::Unauthorized, KeyError::Invalid)),
- }
- }
-}
-
-#[get("/user/<user>")]
-#[allow(clippy::clippy::needless_pass_by_value)]
-pub fn get_user(user: String, conn: AtuinDbConn) -> ApiResponse {
- use crate::schema::users::dsl::{username, users};
-
- let user: Result<String, diesel::result::Error> = users
- .select(username)
- .filter(username.eq(user))
- .first(&*conn);
-
- if user.is_err() {
- return ApiResponse {
- json: json!({
- "message": "could not find user",
- }),
- status: Status::NotFound,
- };
- }
-
- let user = user.unwrap();
-
- ApiResponse {
- json: json!({ "username": user.as_str() }),
- status: Status::Ok,
- }
-}
-
-#[post("/register", data = "<register>")]
-#[allow(clippy::clippy::needless_pass_by_value)]
-pub fn register(
- conn: AtuinDbConn,
- register: Json<RegisterRequest>,
- settings: State<Settings>,
-) -> ApiResponse {
- if !settings.server.open_registration {
- return ApiResponse {
- status: Status::BadRequest,
- json: json!({
- "message": "registrations are not open"
- }),
- };
- }
-
- let hashed = hash_secret(register.password.as_str());
-
- let new_user = NewUser {
- email: register.email.as_str(),
- username: register.username.as_str(),
- password: hashed.as_str(),
- };
-
- let user = diesel::insert_into(users::table)
- .values(&new_user)
- .get_result(&*conn);
-
- if user.is_err() {
- return ApiResponse {
- status: Status::BadRequest,
- json: json!({
- "message": "failed to create user - username or email in use?",
- }),
- };
- }
-
- let user: User = user.unwrap();
- let token = Uuid::new_v4().to_simple().to_string();
-
- let new_session = NewSession {
- user_id: user.id,
- token: token.as_str(),
- };
-
- match diesel::insert_into(sessions::table)
- .values(&new_session)
- .execute(&*conn)
- {
- Ok(_) => ApiResponse {
- status: Status::Ok,
- json: json!({"message": "user created!", "session": token}),
- },
- Err(_) => ApiResponse {
- status: Status::BadRequest,
- json: json!({ "message": "failed to create user"}),
- },
- }
-}
-
-#[post("/login", data = "<login>")]
-#[allow(clippy::clippy::needless_pass_by_value)]
-pub fn login(conn: AtuinDbConn, login: Json<LoginRequest>) -> ApiResponse {
- let user = users::table
- .filter(users::username.eq(login.username.as_str()))
- .first(&*conn);
-
- if user.is_err() {
- return ApiResponse {
- status: Status::NotFound,
- json: json!({"message": "user not found"}),
- };
- }
-
- let user: User = user.unwrap();
-
- let session = sessions::table
- .filter(sessions::user_id.eq(user.id))
- .first(&*conn);
-
- // a session should exist...
- if session.is_err() {
- return ApiResponse {
- status: Status::InternalServerError,
- json: json!({"message": "something went wrong"}),
- };
- }
-
- let verified = verify_str(user.password.as_str(), login.password.as_str());
-
- if !verified {
- return ApiResponse {
- status: Status::NotFound,
- json: json!({"message": "user not found"}),
- };
- }
-
- let session: Session = session.unwrap();
-
- ApiResponse {
- status: Status::Ok,
- json: json!({"session": session.token}),
- }
-}
-*/
diff --git a/atuin-server/src/calendar.rs b/atuin-server/src/calendar.rs
deleted file mode 100644
index 7c05dce38..000000000
--- a/atuin-server/src/calendar.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-// Calendar data
-
-use serde::{Deserialize, Serialize};
-
-pub enum TimePeriod {
- YEAR,
- MONTH,
- DAY,
-}
-
-#[derive(Debug, Serialize, Deserialize)]
-pub struct TimePeriodInfo {
- pub count: u64,
-
- // TODO: Use this for merkle tree magic
- pub hash: String,
-}
diff --git a/atuin-server/src/database.rs b/atuin-server/src/database.rs
deleted file mode 100644
index 894fab7bd..000000000
--- a/atuin-server/src/database.rs
+++ /dev/null
@@ -1,510 +0,0 @@
-use std::collections::HashMap;
-
-use async_trait::async_trait;
-use chrono::{Datelike, TimeZone};
-use chronoutil::RelativeDuration;
-use sqlx::{postgres::PgPoolOptions, Result};
-
-use sqlx::Row;
-
-use tracing::{debug, instrument, warn};
-
-use super::{
- calendar::{TimePeriod, TimePeriodInfo},
- models::{History, NewHistory, NewSession, NewUser, Session, User},
-};
-use crate::settings::Settings;
-
-use atuin_common::utils::get_days_from_month;
-
-#[async_trait]
-pub trait Database {
- async fn get_session(&self, token: &str) -> Result<Session>;
- async fn get_session_user(&self, token: &str) -> Result<User>;
- async fn add_session(&self, session: &NewSession) -> Result<()>;
-
- async fn get_user(&self, username: &str) -> Result<User>;
- async fn get_user_session(&self, u: &User) -> Result<Session>;
- async fn add_user(&self, user: &NewUser) -> Result<i64>;
- async fn delete_user(&self, u: &User) -> Result<()>;
-
- async fn count_history(&self, user: &User) -> Result<i64>;
- async fn count_history_cached(&self, user: &User) -> Result<i64>;
-
- async fn delete_history(&self, user: &User, id: String) -> Result<()>;
- async fn deleted_history(&self, user: &User) -> Result<Vec<String>>;
-
- async fn count_history_range(
- &self,
- user: &User,
- start: chrono::NaiveDateTime,
- end: chrono::NaiveDateTime,
- ) -> Result<i64>;
- async fn count_history_day(&self, user: &User, date: chrono::NaiveDate) -> Result<i64>;
- async fn count_history_month(&self, user: &User, date: chrono::NaiveDate) -> Result<i64>;
- async fn count_history_year(&self, user: &User, year: i32) -> Result<i64>;
-
- async fn list_history(
- &self,
- user: &User,
- created_after: chrono::NaiveDateTime,
- since: chrono::NaiveDateTime,
- host: &str,
- page_size: i64,
- ) -> Result<Vec<History>>;
-
- async fn add_history(&self, history: &[NewHistory]) -> Result<()>;
-
- async fn oldest_history(&self, user: &User) -> Result<History>;
-
- async fn calendar(
- &self,
- user: &User,
- period: TimePeriod,
- year: u64,
- month: u64,
- ) -> Result<HashMap<u64, TimePeriodInfo>>;
-}
-
-#[derive(Clone)]
-pub struct Postgres {
- pool: sqlx::Pool<sqlx::postgres::Postgres>,
- settings: Settings,
-}
-
-impl Postgres {
- pub async fn new(settings: Settings) -> Result<Self> {
- let pool = PgPoolOptions::new()
- .max_connections(100)
- .connect(settings.db_uri.as_str())
- .await?;
-
- sqlx::migrate!("./migrations").run(&pool).await?;
-
- Ok(Self { pool, settings })
- }
-}
-
-#[async_trait]
-impl Database for Postgres {
- #[instrument(skip_all)]
- async fn get_session(&self, token: &str) -> Result<Session> {
- sqlx::query_as::<_, Session>("select id, user_id, token from sessions where token = $1")
- .bind(token)
- .fetch_one(&self.pool)
- .await
- }
-
- #[instrument(skip_all)]
- async fn get_user(&self, username: &str) -> Result<User> {
- sqlx::query_as::<_, User>(
- "select id, username, email, password from users where username = $1",
- )
- .bind(username)
- .fetch_one(&self.pool)
- .await
- }
-
- #[instrument(skip_all)]
- async fn get_session_user(&self, token: &str) -> Result<User> {
- sqlx::query_as::<_, User>(
- "select users.id, users.username, users.email, users.password from users
- inner join sessions
- on users.id = sessions.user_id
- and sessions.token = $1",
- )
- .bind(token)
- .fetch_one(&self.pool)
- .await
- }
-
- #[instrument(skip_all)]
- async fn count_history(&self, user: &User) -> Result<i64> {
- // The cache is new, and the user might not yet have a cache value.
- // They will have one as soon as they post up some new history, but handle that
- // edge case.
-
- let res: (i64,) = sqlx::query_as(
- "select count(1) from history
- where user_id = $1",
- )
- .bind(user.id)
- .fetch_one(&self.pool)
- .await?;
-
- Ok(res.0)
- }
-
- #[instrument(skip_all)]
- async fn count_history_cached(&self, user: &User) -> Result<i64> {
- let res: (i32,) = sqlx::query_as(
- "select total from total_history_count_user
- where user_id = $1",
- )
- .bind(user.id)
- .fetch_one(&self.pool)
- .await?;
-
- Ok(res.0 as i64)
- }
-
- async fn delete_history(&self, user: &User, id: String) -> Result<()> {
- sqlx::query(
- "update history
- set deleted_at = $3
- where user_id = $1
- and client_id = $2
- and deleted_at is null", // don't just keep setting it
- )
- .bind(user.id)
- .bind(id)
- .bind(chrono::Utc::now().naive_utc())
- .fetch_all(&self.pool)
- .await?;
-
- Ok(())
- }
-
- #[instrument(skip_all)]
- async fn deleted_history(&self, user: &User) -> Result<Vec<String>> {
- // The cache is new, and the user might not yet have a cache value.
- // They will have one as soon as they post up some new history, but handle that
- // edge case.
-
- let res = sqlx::query(
- "select client_id from history
- where user_id = $1
- and deleted_at is not null",
- )
- .bind(user.id)
- .fetch_all(&self.pool)
- .await?;
-
- let res = res
- .iter()
- .map(|row| row.get::<String, _>("client_id"))
- .collect();
-
- Ok(res)
- }
-
- #[instrument(skip_all)]
- async fn count_history_range(
- &self,
- user: &User,
- start: chrono::NaiveDateTime,
- end: chrono::NaiveDateTime,
- ) -> Result<i64> {
- let res: (i64,) = sqlx::query_as(
- "select count(1) from history
- where user_id = $1
- and timestamp >= $2::date
- and timestamp < $3::date",
- )
- .bind(user.id)
- .bind(start)
- .bind(end)
- .fetch_one(&self.pool)
- .await?;
-
- Ok(res.0)
- }
-
- // Count the history for a given year
- #[instrument(skip_all)]
- async fn count_history_year(&self, user: &User, year: i32) -> Result<i64> {
- let start = chrono::Utc.ymd(year, 1, 1).and_hms_nano(0, 0, 0, 0);
- let end = start + RelativeDuration::years(1);
-
- let res = self
- .count_history_range(user, start.naive_utc(), end.naive_utc())
- .await?;
- Ok(res)
- }
-
- // Count the history for a given month
- #[instrument(skip_all)]
- async fn count_history_month(&self, user: &User, month: chrono::NaiveDate) -> Result<i64> {
- let start = chrono::Utc
- .ymd(month.year(), month.month(), 1)
- .and_hms_nano(0, 0, 0, 0);
-
- // ofc...
- let end = if month.month() < 12 {
- chrono::Utc
- .ymd(month.year(), month.month() + 1, 1)
- .and_hms_nano(0, 0, 0, 0)
- } else {
- chrono::Utc
- .ymd(month.year() + 1, 1, 1)
- .and_hms_nano(0, 0, 0, 0)
- };
-
- debug!("start: {}, end: {}", start, end);
-
- let res = self
- .count_history_range(user, start.naive_utc(), end.naive_utc())
- .await?;
- Ok(res)
- }
-
- // Count the history for a given day
- #[instrument(skip_all)]
- async fn count_history_day(&self, user: &User, day: chrono::NaiveDate) -> Result<i64> {
- let start = chrono::Utc
- .ymd(day.year(), day.month(), day.day())
- .and_hms_nano(0, 0, 0, 0);
- let end = chrono::Utc
- .ymd(day.year(), day.month(), day.day() + 1)
- .and_hms_nano(0, 0, 0, 0);