summaryrefslogtreecommitdiffstats
path: root/src/endpoint/scheduler.rs
diff options
context:
space:
mode:
authorMatthias Beyer <matthias.beyer@atos.net>2021-02-22 10:44:19 +0100
committerMatthias Beyer <mail@beyermatthias.de>2021-02-25 10:40:16 +0100
commitfe5b97425fa08854b4f1ce37451166f8a81d54d2 (patch)
tree01f3c7f85b4599e16fff44425c93789b825aeccc /src/endpoint/scheduler.rs
parentdf1ab6c67de7591f849b14b8bdd94aadfc8fe961 (diff)
Multiple release stores
This patch adds the ability to have more than one release store. With this patch, a user can (has to) configure release store names in the configuration file, and can then specify one of the configured names to release the artifacts to. This way, different release "channels" can be served, for example a stable channel and a rolling release channel (although "channel" is not in our wording). The code was adapted to be able to fetch releases from multiple release directories, in the crate::db::find_artifact implementation, so that re-using artifacts works across all release directories. Signed-off-by: Matthias Beyer <matthias.beyer@atos.net>
Diffstat (limited to 'src/endpoint/scheduler.rs')
-rw-r--r--src/endpoint/scheduler.rs12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/endpoint/scheduler.rs b/src/endpoint/scheduler.rs
index db0b826..826f905 100644
--- a/src/endpoint/scheduler.rs
+++ b/src/endpoint/scheduler.rs
@@ -42,7 +42,7 @@ pub struct EndpointScheduler {
endpoints: Vec<Arc<RwLock<Endpoint>>>,
staging_store: Arc<RwLock<StagingStore>>,
- release_store: Arc<RwLock<ReleaseStore>>,
+ release_stores: Vec<Arc<ReleaseStore>>,
db: Arc<PgConnection>,
submit: crate::db::models::Submit,
}
@@ -51,7 +51,7 @@ impl EndpointScheduler {
pub async fn setup(
endpoints: Vec<EndpointConfiguration>,
staging_store: Arc<RwLock<StagingStore>>,
- release_store: Arc<RwLock<ReleaseStore>>,
+ release_stores: Vec<Arc<ReleaseStore>>,
db: Arc<PgConnection>,
submit: crate::db::models::Submit,
log_dir: Option<PathBuf>,
@@ -62,7 +62,7 @@ impl EndpointScheduler {
log_dir,
endpoints,
staging_store,
- release_store,
+ release_stores,
db,
submit,
})
@@ -99,7 +99,7 @@ impl EndpointScheduler {
endpoint,
job,
staging_store: self.staging_store.clone(),
- release_store: self.release_store.clone(),
+ release_stores: self.release_stores.clone(),
db: self.db.clone(),
submit: self.submit.clone(),
})
@@ -142,7 +142,7 @@ pub struct JobHandle {
bar: ProgressBar,
db: Arc<PgConnection>,
staging_store: Arc<RwLock<StagingStore>>,
- release_store: Arc<RwLock<ReleaseStore>>,
+ release_stores: Vec<Arc<ReleaseStore>>,
submit: crate::db::models::Submit,
}
@@ -163,7 +163,7 @@ impl JobHandle {
let job_id = *self.job.uuid();
trace!("Running on Job {} on Endpoint {}", job_id, ep.name());
let prepared_container = ep
- .prepare_container(self.job, self.staging_store.clone(), self.release_store.clone())
+ .prepare_container(self.job, self.staging_store.clone(), self.release_stores.clone())
.await?;
let container_id = prepared_container.create_info().id.clone();
let running_container = prepared_container