1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
use std::path::PathBuf;
use std::sync::RwLock;
use std::sync::Arc;
use std::collections::BTreeMap;
use anyhow::anyhow;
use anyhow::Error;
use anyhow::Context;
use anyhow::Result;
use typed_builder::TypedBuilder;
use diesel::PgConnection;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender;
use resiter::AndThen;
use crate::endpoint::EndpointConfiguration;
use crate::endpoint::EndpointScheduler;
use crate::job::JobSet;
use crate::job::RunnableJob;
use crate::log::LogItem;
use crate::filestore::StagingStore;
use crate::filestore::ReleaseStore;
use crate::log::FileLogSinkFactory;
use crate::log::LogSink;
use crate::db::models::Submit;
use crate::db::models::EnvVar;
use crate::job::JobResource;
use crate::filestore::MergedStores;
pub struct Orchestrator {
scheduler: EndpointScheduler,
staging_store: Arc<RwLock<StagingStore>>,
release_store: Arc<RwLock<ReleaseStore>>,
jobsets: Vec<JobSet>,
database: PgConnection,
file_log_sink_factory: Option<FileLogSinkFactory>,
}
#[derive(TypedBuilder)]
pub struct OrchestratorSetup {
endpoint_config: Vec<EndpointConfiguration>,
staging_store: Arc<RwLock<StagingStore>>,
release_store: Arc<RwLock<ReleaseStore>>,
jobsets: Vec<JobSet>,
database: PgConnection,
submit: Submit,
file_log_sink_factory: Option<FileLogSinkFactory>,
}
impl OrchestratorSetup {
pub async fn setup(self) -> Result<Orchestrator> {
let scheduler = EndpointScheduler::setup(self.endpoint_config, self.staging_store.clone()).await?;
Ok(Orchestrator {
scheduler: scheduler,
staging_store: self.staging_store,
release_store: self.release_store,
jobsets: self.jobsets,
database: self.database,
file_log_sink_factory: self.file_log_sink_factory,
})
}
}
impl Orchestrator {
pub async fn run(self) -> Result<()> {
use tokio::stream::StreamExt;
let _database = self.database;
for jobset in self.jobsets.into_iter() {
let merged_store = MergedStores::new(self.release_store.clone(), self.staging_store.clone());
let (results, logs) = { // run the jobs in the set
let unordered_results = futures::stream::FuturesUnordered::new();
let unordered_receivers = futures::stream::FuturesUnordered::new();
for runnable in jobset.into_runables(&merged_store) {
let runnable = runnable?;
trace!("Runnable {} for package {}", runnable.uuid(), runnable.package().name());
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel::<LogItem>();
let jobhandle = self.scheduler.schedule_job(runnable, sender).await?;
trace!("Jobhandle -> {:?}", jobhandle);
unordered_results.push(async move {
jobhandle.get_result()
.await
});
unordered_receivers.push(async move {
receiver
});
}
(unordered_results.collect::<Result<Vec<_>>>(), unordered_receivers.collect::<Vec<_>>())
};
let (results, _logs) = tokio::join!(results, logs);
// TODO: Use logs.
let results = results?
.into_iter()
.flatten()
.collect::<Vec<PathBuf>>();
{ // check if all paths that were written are actually there in the staging store
let staging_store_lock = self.staging_store
.read()
.map_err(|_| anyhow!("Lock Poisoned"))?;
trace!("Checking results...");
for path in results.iter() {
trace!("Checking path: {}", path.display());
if !staging_store_lock.path_exists_in_store_root(&path) {
return Err(anyhow!("Result path {} is missing from staging store", path.display()))
.with_context(|| anyhow!("Should be: {}/{}", staging_store_lock.root_path().display(), path.display()))
.map_err(Error::from)
}
}
}
}
Ok(())
}
}
|