diff options
author | Matthias Beyer <mail@beyermatthias.de> | 2020-11-14 14:37:20 +0100 |
---|---|---|
committer | Matthias Beyer <mail@beyermatthias.de> | 2020-11-14 18:18:27 +0100 |
commit | 056a2c66e3d3fc8c4b6803acb9d151673b78a23f (patch) | |
tree | 532f8970c61f147af3dcde496953ef1a50844b0a | |
parent | 2de687ac49c1d9aa48334d9cc0550f65ddfc96f3 (diff) |
Add debug output, error context information
Signed-off-by: Matthias Beyer <mail@beyermatthias.de>
-rw-r--r-- | src/endpoint/configured.rs | 31 | ||||
-rw-r--r-- | src/endpoint/scheduler.rs | 4 | ||||
-rw-r--r-- | src/orchestrator/orchestrator.rs | 5 | ||||
-rw-r--r-- | src/source/mod.rs | 1 |
4 files changed, 24 insertions, 17 deletions
diff --git a/src/endpoint/configured.rs b/src/endpoint/configured.rs index 05ad8a3..23b3069 100644 --- a/src/endpoint/configured.rs +++ b/src/endpoint/configured.rs @@ -147,7 +147,7 @@ impl Endpoint { .into_iter() .map(ImageName::from) }) - .flatten() + .flatten() .collect::<Vec<ImageName>>(); trace!("Available images = {:?}", available_names); @@ -160,7 +160,7 @@ impl Endpoint { Ok(()) } }) - .collect::<Result<Vec<_>>>() + .collect::<Result<Vec<_>>>() .map(|_| ()) } @@ -175,18 +175,21 @@ impl Endpoint { .filter_map(JobResource::env) .map(|(k, v)| format!("{}={}", k, v)) .collect::<Vec<_>>(); + trace!("Job resources: Environment variables = {:?}", envs); let builder_opts = shiplift::ContainerOptions::builder(job.image().as_ref()) - .env(envs.iter().map(AsRef::as_ref).collect()) - .cmd(vec!["/bin/bash"]) // we start the container with /bin/bash, but exec() the script in it later - .attach_stdin(true) // we have to attach, otherwise bash exits - .build(); + .env(envs.iter().map(AsRef::as_ref).collect()) + .cmd(vec!["/bin/bash"]) // we start the container with /bin/bash, but exec() the script in it later + .attach_stdin(true) // we have to attach, otherwise bash exits + .build(); + trace!("Builder options = {:?}", builder_opts); let create_info = self.docker .containers() .create(&builder_opts) .await .with_context(|| anyhow!("Creating container on '{}'", self.name))?; + trace!("Create info = {:?}", create_info); if let Some(warnings) = create_info.warnings.as_ref() { for warning in warnings { @@ -203,8 +206,10 @@ impl Endpoint { .attach_stderr(true) .attach_stdout(true) .build(); + trace!("Exec options = {:?}", exec_opts); let container = self.docker.containers().get(&container_id); + trace!("Container id = {:?}", container_id); { // copy source to container use tokio::io::AsyncReadExt; @@ -213,6 +218,9 @@ impl Endpoint { let destination = PathBuf::from("/inputs").join({ source_path.file_name().ok_or_else(|| anyhow!("Not a file: {}", source_path.display()))? }); + trace!("Package source = {:?}", pkgsource); + trace!("Source path = {:?}", source_path); + trace!("Source dest = {:?}", destination); let mut buf = vec![]; tokio::fs::OpenOptions::new() .create(false) @@ -221,7 +229,8 @@ impl Endpoint { .write(false) .read(true) .open(source_path) - .await? + .await + .with_context(|| anyhow!("Getting source file: {}", source_path.display()))? .read_to_end(&mut buf) .await?; @@ -248,10 +257,10 @@ impl Endpoint { .await .map_err(Error::from) }) - .collect::<futures::stream::FuturesUnordered<_>>() + .collect::<futures::stream::FuturesUnordered<_>>() .collect::<Result<Vec<_>>>() .await?; - } + } container .copy_file_into(script_path, job.script().as_ref().as_bytes()) @@ -280,9 +289,9 @@ impl Endpoint { }) }) }) - .collect::<Result<Vec<_>>>() + .collect::<Result<Vec<_>>>() }) - .inspect(|r| { trace!("Fetching log from container {} -> {:?}", container_id, r); }) + .inspect(|r| { trace!("Fetching log from container {} -> {:?}", container_id, r); }) .map(|r| r.with_context(|| anyhow!("Fetching log from container {} on {}", container_id, self.name))) .await?; diff --git a/src/endpoint/scheduler.rs b/src/endpoint/scheduler.rs index b039c71..3c2ebee 100644 --- a/src/endpoint/scheduler.rs +++ b/src/endpoint/scheduler.rs @@ -66,11 +66,11 @@ impl EndpointScheduler { /// # Warning /// /// This function blocks as long as there is no free endpoint available! - pub async fn schedule_job(&self, job: RunnableJob, multibar: &indicatif::MultiProgress) -> Result<JobHandle> { + pub async fn schedule_job(&self, job: RunnableJob) -> Result<JobHandle> { let endpoint = self.select_free_endpoint().await?; Ok(JobHandle { - bar: multibar.add(self.progressbars.job_bar(job.uuid())), + bar: self.progressbars.job_bar(job.uuid()), endpoint, job, staging_store: self.staging_store.clone(), diff --git a/src/orchestrator/orchestrator.rs b/src/orchestrator/orchestrator.rs index 6d8ff11..33dcd1f 100644 --- a/src/orchestrator/orchestrator.rs +++ b/src/orchestrator/orchestrator.rs @@ -75,8 +75,6 @@ impl Orchestrator { let number_of_jobsets = self.jobsets.len(); let database = self.database; - let multibar = indicatif::MultiProgress::new(); - for (i, jobset) in self.jobsets.into_iter().enumerate() { let merged_store = MergedStores::new(self.release_store.clone(), self.staging_store.clone()); @@ -87,7 +85,7 @@ impl Orchestrator { let job_id = runnable.uuid().clone(); trace!("Runnable {} for package {}", job_id, runnable.package().name()); - let jobhandle = self.scheduler.schedule_job(runnable, &multibar).await?; + let jobhandle = self.scheduler.schedule_job(runnable).await?; trace!("Jobhandle -> {:?}", jobhandle); // clone the bar here, so we can give a handle to the async result fetcher closure @@ -127,7 +125,6 @@ impl Orchestrator { let mut results = results; // rebind! report_result.append(&mut results); } - multibar.join()?; Ok(report_result) } diff --git a/src/source/mod.rs b/src/source/mod.rs index 31777dc..162c787 100644 --- a/src/source/mod.rs +++ b/src/source/mod.rs @@ -25,6 +25,7 @@ impl SourceCache { } } +#[derive(Debug)] pub struct SourceEntry { cache_root: PathBuf, package_name: PackageName, |